aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig34
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c423
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c549
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c328
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c211
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c479
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c473
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c248
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c711
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c358
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c230
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c219
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c141
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c397
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c288
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c380
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c359
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c271
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c365
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c311
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c415
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c658
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c496
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c365
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c378
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c4231
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c837
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c666
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c769
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c1216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c927
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c)31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c388
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nvd.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c509
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c1767
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h777
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c507
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c158
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c1120
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.h139
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c571
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c1070
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c81
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c133
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c241
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c173
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c55
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c405
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c137
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c64
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c349
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.h84
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c92
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c44
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c39
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c105
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c445
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c44
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c88
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h73
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h360
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c344
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c156
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c425
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c338
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/soc15_int.h1
-rw-r--r--drivers/gpu/drm/amd/amdxcp/Makefile25
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c110
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h29
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c782
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c28
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c354
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c253
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c95
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c132
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c45
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c183
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h46
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c831
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c376
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c1149
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c370
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c278
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c298
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c162
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c174
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c185
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c170
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c247
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c236
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c244
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c663
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c592
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h167
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h231
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c113
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c229
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c253
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c103
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c292
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c274
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h9
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h29
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h605
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h183
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c18
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c67
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h35
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c16
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c85
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h28
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c48
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h5
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h69
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h192
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h1112
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h56
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h325
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_offset.h279
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_sh_mask.h1019
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h24
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_offset.h177
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_sh_mask.h428
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h2332
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h10919
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h1
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h32
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h62
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h72
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h24
-rw-r--r--drivers/gpu/drm/amd/include/v9_structs.h30
-rw-r--r--drivers/gpu/drm/amd/include/yellow_carp_offset.h6
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c534
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h37
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c54
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h63
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c64
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c21
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c24
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c30
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h14
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c48
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c27
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c30
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c26
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c21
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c125
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h18
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c12
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c36
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h19
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h12
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h22
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h45
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h73
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h12
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h42
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h20
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c89
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h51
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h15
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h37
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c60
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c90
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c131
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c123
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c592
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c113
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c95
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c534
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c552
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c154
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h4
-rw-r--r--drivers/gpu/drm/arm/Kconfig1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c22
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c58
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c11
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h3
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c6
-rw-r--r--drivers/gpu/drm/armada/Kconfig2
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c6
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c6
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c6
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c66
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c43
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h105
-rw-r--r--drivers/gpu/drm/ast/ast_main.c307
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c137
-rw-r--r--drivers/gpu/drm/ast/ast_post.c77
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c25
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c6
-rw-r--r--drivers/gpu/drm/bridge/Kconfig20
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c11
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c122
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c3
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c80
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c9
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h2
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c2
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c2
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c1
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c1
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb.c2
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c1
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c132
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c1
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c6
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c1
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c2
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c79
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c5
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c40
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c2
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c37
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c58
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c30
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c63
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c14
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c31
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c39
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c45
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c58
-rw-r--r--drivers/gpu/drm/display/drm_hdcp_helper.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c12
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c58
-rw-r--r--drivers/gpu/drm/drm_bridge.c46
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c13
-rw-r--r--drivers/gpu/drm/drm_buddy.c8
-rw-r--r--drivers/gpu/drm/drm_client.c21
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c6
-rw-r--r--drivers/gpu/drm/drm_connector.c197
-rw-r--r--drivers/gpu/drm/drm_debugfs.c42
-rw-r--r--drivers/gpu/drm/drm_drv.c8
-rw-r--r--drivers/gpu/drm/drm_edid.c32
-rw-r--r--drivers/gpu/drm/drm_exec.c333
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c12
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c17
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c11
-rw-r--r--drivers/gpu/drm/drm_gem.c75
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c216
-rw-r--r--drivers/gpu/drm/drm_gpuva_mgr.c1725
-rw-r--r--drivers/gpu/drm/drm_internal.h2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c5
-rw-r--r--drivers/gpu/drm/drm_managed.c30
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c17
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c3
-rw-r--r--drivers/gpu/drm/drm_mode_object.c4
-rw-r--r--drivers/gpu/drm/drm_panel.c218
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c2
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c12
-rw-r--r--drivers/gpu/drm/drm_prime.c83
-rw-r--r--drivers/gpu/drm/drm_syncobj.c154
-rw-r--r--drivers/gpu/drm/drm_sysfs.c23
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c190
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h13
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c63
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c5
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c6
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c10
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c6
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c7
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c1
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c8
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c23
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c3
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c139
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c188
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c345
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c117
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c153
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c371
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c72
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c620
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.h67
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c218
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c214
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h1
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c7
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c2
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h71
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c30
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c57
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c38
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c12
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c8
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c152
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.h21
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c154
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_defines.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c108
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c33
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c103
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.c159
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.h29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c161
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c3
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_binary_headers.h75
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c347
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c141
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.c39
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c24
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c131
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c99
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c25
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h116
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c60
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c94
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c69
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c14
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h53
-rw-r--r--drivers/gpu/drm/i915/i915_request.c17
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c43
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h7
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c15
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c194
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h11
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_step.c10
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c44
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h5
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c22
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h12
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c31
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c54
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c18
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.c5
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c11
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c2
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig2
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c2
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c11
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c8
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c2
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c8
-rw-r--r--drivers/gpu/drm/loongson/Kconfig17
-rw-r--r--drivers/gpu/drm/loongson/Makefile22
-rw-r--r--drivers/gpu/drm/loongson/loongson_device.c102
-rw-r--r--drivers/gpu/drm/loongson/loongson_module.c33
-rw-r--r--drivers/gpu/drm/loongson/loongson_module.h12
-rw-r--r--drivers/gpu/drm/loongson/lsdc_benchmark.c133
-rw-r--r--drivers/gpu/drm/loongson/lsdc_benchmark.h13
-rw-r--r--drivers/gpu/drm/loongson/lsdc_crtc.c1024
-rw-r--r--drivers/gpu/drm/loongson/lsdc_debugfs.c110
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c456
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.h388
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.c311
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.h37
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gfxpll.c199
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gfxpll.h52
-rw-r--r--drivers/gpu/drm/loongson/lsdc_i2c.c179
-rw-r--r--drivers/gpu/drm/loongson/lsdc_i2c.h29
-rw-r--r--drivers/gpu/drm/loongson/lsdc_irq.c74
-rw-r--r--drivers/gpu/drm/loongson/lsdc_irq.h16
-rw-r--r--drivers/gpu/drm/loongson/lsdc_output.h21
-rw-r--r--drivers/gpu/drm/loongson/lsdc_output_7a1000.c178
-rw-r--r--drivers/gpu/drm/loongson/lsdc_output_7a2000.c552
-rw-r--r--drivers/gpu/drm/loongson/lsdc_pixpll.c481
-rw-r--r--drivers/gpu/drm/loongson/lsdc_pixpll.h86
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c793
-rw-r--r--drivers/gpu/drm/loongson/lsdc_probe.c56
-rw-r--r--drivers/gpu/drm/loongson/lsdc_probe.h12
-rw-r--r--drivers/gpu/drm/loongson/lsdc_regs.h406
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.c593
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.h99
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c8
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c8
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c366
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c105
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c16
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c19
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c8
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c6
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h1
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c13
-rw-r--r--drivers/gpu/drm/meson/meson_dw_mipi_dsi.c3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c5
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h1
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c15
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c2
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c7
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c6
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c9
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c25
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000c.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/vmm.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c219
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c64
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h93
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c411
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.h54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c86
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c419
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h127
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c1916
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.h108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mmu.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/intr.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c105
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c197
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c27
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c24
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c9
-rw-r--r--drivers/gpu/drm/panel/Kconfig23
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c3
-rw-r--r--drivers/gpu/drm/panel/panel-auo-a030jtn01.c2
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c1
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c7
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c2
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c1
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c2
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c2
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c3
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c2
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c196
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c3
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c1
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c2
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c36
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c1
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c8
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c1
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c2
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c2
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c3
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c1
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c1
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c1
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c1
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c41
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c1
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c6
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c6
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c90
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c359
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c1
-rw-r--r--drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c406
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c2
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c390
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c18
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c16
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c19
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c2
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h4
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c11
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c25
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c6
-rw-r--r--drivers/gpu/drm/radeon/Kconfig2
-rw-r--r--drivers/gpu/drm/radeon/atom.c28
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c28
-rw-r--r--drivers/gpu/drm/radeon/cik.c2
-rw-r--r--drivers/gpu/drm/radeon/clearstate_si.h3
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/r100.c8
-rw-r--r--drivers/gpu/drm/radeon/r300.c8
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h13
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c4
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv740_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/rv770.c33
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c36
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h51
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c6
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c17
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c8
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c58
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c6
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c6
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c11
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c7
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c9
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c6
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c8
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c6
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c11
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c18
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c43
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c40
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c6
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c203
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.h3
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c4
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c3
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c6
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c5
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c8
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c5
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c5
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c6
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c5
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c5
-rw-r--r--drivers/gpu/drm/stm/drv.c9
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c6
-rw-r--r--drivers/gpu/drm/stm/ltdc.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c8
-rw-r--r--drivers/gpu/drm/tegra/Kconfig2
-rw-r--r--drivers/gpu/drm/tegra/dc.c3
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c8
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/fbdev.c11
-rw-r--r--drivers/gpu/drm/tegra/gem.c2
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c19
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c18
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c51
-rw-r--r--drivers/gpu/drm/tegra/hub.c2
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c21
-rw-r--r--drivers/gpu/drm/tegra/sor.c7
-rw-r--r--drivers/gpu/drm/tegra/vic.c19
-rw-r--r--drivers/gpu/drm/tests/Makefile3
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c8
-rw-r--r--drivers/gpu/drm/tests/drm_exec_test.c213
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c4
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c141
-rw-r--r--drivers/gpu/drm/tests/drm_modes_test.c8
-rw-r--r--drivers/gpu/drm/tests/drm_probe_helper_test.c8
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c59
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h2
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c9
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c140
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.h5
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c12
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c6
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c7
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c4
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c6
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c3
-rw-r--r--drivers/gpu/drm/tiny/repaper.c2
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c6
-rw-r--r--drivers/gpu/drm/ttm/Makefile1
-rw-r--r--drivers/gpu/drm/ttm/tests/.kunitconfig4
-rw-r--r--drivers/gpu/drm/ttm/tests/Makefile6
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c212
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c113
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h41
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c437
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c34
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c6
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c8
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c13
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c9
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.c12
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c115
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_submit.c256
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c105
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c12
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c20
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h17
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c153
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h2
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h16
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c4
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c14
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c2
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c10
-rw-r--r--drivers/gpu/host1x/bus.c29
-rw-r--r--drivers/gpu/host1x/context.c10
-rw-r--r--drivers/gpu/host1x/dev.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c5
1459 files changed, 84007 insertions, 20999 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index afb3b2f5f425..ab9ef1c20349 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,9 @@ menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
select DRM_PANEL_ORIENTATION_QUIRKS
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
+ select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select HDMI
select I2C
select DMA_SHARED_BUFFER
@@ -80,6 +83,7 @@ config DRM_KUNIT_TEST
select DRM_BUDDY
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
+ select DRM_EXEC
default KUNIT_ALL_TESTS
help
This builds unit tests for DRM. This option is not useful for
@@ -95,7 +99,6 @@ config DRM_KUNIT_TEST
config DRM_KMS_HELPER
tristate
depends on DRM
- select FB_SYS_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
CRTC helpers for KMS drivers.
@@ -131,9 +134,7 @@ config DRM_DEBUG_MODESET_LOCK
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
- depends on DRM_KMS_HELPER
- depends on FB=y || FB=DRM_KMS_HELPER
- select FRAMEBUFFER_CONSOLE if !EXPERT
+ depends on DRM
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
default y
help
@@ -194,6 +195,27 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
+config DRM_TTM_KUNIT_TEST
+ tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
+ default n
+ depends on DRM && KUNIT && MMU
+ select DRM_TTM
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ Enables unit tests for TTM, a GPU memory manager subsystem used
+ to manage memory buffers. This option is mostly useful for kernel
+ developers.
+
+ If in doubt, say "N".
+
+config DRM_EXEC
+ tristate
+ depends on DRM
+ help
+ Execution context for command submissions
+
config DRM_BUDDY
tristate
depends on DRM
@@ -216,7 +238,7 @@ config DRM_TTM_HELPER
config DRM_GEM_DMA_HELPER
tristate
depends on DRM
- select FB_SYS_HELPERS if DRM_FBDEV_EMULATION
+ select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM DMA helper functions
@@ -323,6 +345,8 @@ source "drivers/gpu/drm/v3d/Kconfig"
source "drivers/gpu/drm/vc4/Kconfig"
+source "drivers/gpu/drm/loongson/Kconfig"
+
source "drivers/gpu/drm/etnaviv/Kconfig"
source "drivers/gpu/drm/hisilicon/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 982d9e06168a..215e78e79125 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -45,6 +45,7 @@ drm-y := \
drm_vblank.o \
drm_vblank_work.o \
drm_vma_manager.o \
+ drm_gpuva_mgr.o \
drm_writeback.o
drm-$(CONFIG_DRM_LEGACY) += \
drm_agpsupport.o \
@@ -78,6 +79,8 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
#
# Memory-management helpers
#
+#
+obj-$(CONFIG_DRM_EXEC) += drm_exec.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
@@ -140,6 +143,7 @@ obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
+obj-$(CONFIG_DRM_AMDGPU)+= amd/amdxcp/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb/
obj-$(CONFIG_DRM_MGAG200) += mgag200/
@@ -193,3 +197,4 @@ obj-y += gud/
obj-$(CONFIG_DRM_HYPERV) += hyperv/
obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
+obj-$(CONFIG_DRM_LOONGSON) += loongson/
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 12adca8c7819..22d88f8ef527 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -21,6 +21,7 @@ config DRM_AMDGPU
select INTERVAL_TREE
select DRM_BUDDY
select DRM_SUBALLOC_HELPER
+ select DRM_EXEC
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
@@ -69,6 +70,16 @@ config DRM_AMDGPU_USERPTR
This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it
isn't already selected to enabled full userptr support.
+config DRM_AMDGPU_WERROR
+ bool "Force the compiler to throw an error instead of a warning when compiling"
+ depends on DRM_AMDGPU
+ depends on EXPERT
+ depends on !COMPILE_TEST
+ default n
+ help
+ Add -Werror to the build flags for amdgpu.ko.
+ Only enable this if you are warning code for amdgpu.ko.
+
source "drivers/gpu/drm/amd/acp/Kconfig"
source "drivers/gpu/drm/amd/display/Kconfig"
source "drivers/gpu/drm/amd/amdkfd/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 415a7fa395c4..384b798a9bad 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -39,10 +39,30 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
-I$(FULL_AMD_PATH)/amdkfd
+subdir-ccflags-y := -Wextra
+subdir-ccflags-y += -Wunused
+subdir-ccflags-y += -Wmissing-prototypes
+subdir-ccflags-y += -Wmissing-declarations
+subdir-ccflags-y += -Wmissing-include-dirs
+subdir-ccflags-y += -Wold-style-definition
+subdir-ccflags-y += -Wmissing-format-attribute
+# Need this to avoid recursive variable evaluation issues
+cond-flags := $(call cc-option, -Wunused-but-set-variable) \
+ $(call cc-option, -Wunused-const-variable) \
+ $(call cc-option, -Wstringop-truncation) \
+ $(call cc-option, -Wpacked-not-aligned)
+subdir-ccflags-y += $(cond-flags)
+subdir-ccflags-y += -Wno-unused-parameter
+subdir-ccflags-y += -Wno-type-limits
+subdir-ccflags-y += -Wno-sign-compare
+subdir-ccflags-y += -Wno-missing-field-initializers
+subdir-ccflags-y += -Wno-override-init
+subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror
+
amdgpu-y := amdgpu_drv.o
# add KMS driver
-amdgpu-y += amdgpu_device.o amdgpu_kms.o \
+amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
@@ -60,7 +80,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
- amdgpu_ring_mux.o
+ amdgpu_ring_mux.o amdgpu_xcp.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -78,7 +98,7 @@ amdgpu-y += \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
- nbio_v7_9.o
+ nbio_v7_9.o aqua_vanjaram.o
# add DF block
amdgpu-y += \
@@ -109,7 +129,8 @@ amdgpu-y += \
vega10_ih.o \
vega20_ih.o \
navi10_ih.o \
- ih_v6_0.o
+ ih_v6_0.o \
+ ih_v6_1.o
# add PSP block
amdgpu-y += \
@@ -183,12 +204,14 @@ amdgpu-y += \
vcn_v2_5.o \
vcn_v3_0.o \
vcn_v4_0.o \
+ vcn_v4_0_3.o \
amdgpu_jpeg.o \
jpeg_v1_0.o \
jpeg_v2_0.o \
jpeg_v2_5.o \
jpeg_v3_0.o \
- jpeg_v4_0.o
+ jpeg_v4_0.o \
+ jpeg_v4_0_3.o
# add ATHUB block
amdgpu-y += \
@@ -203,6 +226,7 @@ amdgpu-y += \
smuio_v11_0.o \
smuio_v11_0_6.o \
smuio_v13_0.o \
+ smuio_v13_0_3.o \
smuio_v13_0_6.o
# add reset block
@@ -228,6 +252,7 @@ amdgpu-y += \
amdgpu_amdkfd_gfx_v9.o \
amdgpu_amdkfd_arcturus.o \
amdgpu_amdkfd_aldebaran.o \
+ amdgpu_amdkfd_gc_9_4_3.o \
amdgpu_amdkfd_gfx_v10.o \
amdgpu_amdkfd_gfx_v10_3.o \
amdgpu_amdkfd_gfx_v11.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 02b827785e39..dc2d53081e80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -53,7 +53,6 @@
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_gem.h>
@@ -107,8 +106,9 @@
#include "amdgpu_fdinfo.h"
#include "amdgpu_mca.h"
#include "amdgpu_ras.h"
+#include "amdgpu_xcp.h"
-#define MAX_GPU_INSTANCE 16
+#define MAX_GPU_INSTANCE 64
struct amdgpu_gpu_instance
{
@@ -192,7 +192,6 @@ extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
-extern uint amdgpu_freesync_vid_mode;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level;
@@ -212,6 +211,8 @@ extern int amdgpu_noretry;
extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias;
extern int amdgpu_use_xgmi_p2p;
+extern int amdgpu_mtype_local;
+extern bool enforce_isolation;
#ifdef CONFIG_HSA_AMD
extern int sched_policy;
extern bool debug_evictions;
@@ -242,9 +243,10 @@ extern int amdgpu_num_kcq;
extern int amdgpu_vcnfw_log;
extern int amdgpu_sg_display;
+extern int amdgpu_user_partt_mode;
+
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
-#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
@@ -282,6 +284,10 @@ extern int amdgpu_sg_display;
#define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
+/* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
+#define AMDGPU_SWCTF_EXTRA_DELAY 50
+
+struct amdgpu_xcp_mgr;
struct amdgpu_device;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
@@ -463,6 +469,8 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
+ /** GPU partition selection */
+ uint32_t xcp_id;
};
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
@@ -573,6 +581,8 @@ struct amdgpu_asic_funcs {
/* query video codecs */
int (*query_video_codecs)(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs);
+ /* encode "> 32bits" smn addressing */
+ u64 (*encode_ext_smn_addressing)(int ext_id);
};
/*
@@ -607,6 +617,9 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
+typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t);
+typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t);
+
typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t);
typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
@@ -657,7 +670,7 @@ enum amd_hw_ip_block_type {
MAX_HWIP
};
-#define HWIP_MAX_INSTANCE 28
+#define HWIP_MAX_INSTANCE 44
#define HW_ID_MAX 300
#define IP_VERSION(mj, mn, rv) (((mj) << 16) | ((mn) << 8) | (rv))
@@ -665,6 +678,17 @@ enum amd_hw_ip_block_type {
#define IP_VERSION_MIN(ver) (((ver) >> 8) & 0xFF)
#define IP_VERSION_REV(ver) ((ver) & 0xFF)
+struct amdgpu_ip_map_info {
+ /* Map of logical to actual dev instances/mask */
+ uint32_t dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE];
+ int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int8_t inst);
+ uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ uint32_t mask);
+};
+
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
@@ -750,6 +774,7 @@ struct amdgpu_device {
struct amdgpu_acp acp;
#endif
struct amdgpu_hive_info *hive;
+ struct amdgpu_xcp_mgr *xcp_mgr;
/* ASIC */
enum amd_asic_type asic_type;
uint32_t family;
@@ -797,6 +822,8 @@ struct amdgpu_device {
amdgpu_wreg_t pcie_wreg;
amdgpu_rreg_t pciep_rreg;
amdgpu_wreg_t pciep_wreg;
+ amdgpu_rreg_ext_t pcie_rreg_ext;
+ amdgpu_wreg_ext_t pcie_wreg_ext;
amdgpu_rreg64_t pcie_rreg64;
amdgpu_wreg64_t pcie_wreg64;
/* protects concurrent UVD register access */
@@ -830,7 +857,7 @@ struct amdgpu_device {
dma_addr_t dummy_page_addr;
struct amdgpu_vm_manager vm_manager;
struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
- unsigned num_vmhubs;
+ DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS);
/* memory management */
struct amdgpu_mman mman;
@@ -962,6 +989,7 @@ struct amdgpu_device {
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+ struct amdgpu_ip_map_info ip_map;
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -1004,7 +1032,6 @@ struct amdgpu_device {
bool has_pr3;
bool ucode_sysfs_en;
- bool psp_sysfs_en;
/* Chip product information */
char product_number[20];
@@ -1020,6 +1047,9 @@ struct amdgpu_device {
struct pci_saved_state *pci_state;
pci_channel_state_t pci_channel_state;
+ /* Track auto wait count on s_barrier settings */
+ bool barrier_has_auto_waitcnt;
+
struct amdgpu_reset_control *reset_cntl;
uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
@@ -1050,6 +1080,8 @@ struct amdgpu_device {
bool job_hang;
bool dc_enabled;
+ /* Mask of active clusters */
+ uint32_t aid_mask;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1081,13 +1113,20 @@ size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
void *buf, size_t size, bool write);
+uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
+ uint32_t inst, uint32_t reg_addr, char reg_name[],
+ uint32_t expected_value, uint32_t mask);
uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t acc_flags);
+u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
+ u64 reg_addr);
void amdgpu_device_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t v,
uint32_t acc_flags);
+void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
+ u64 reg_addr, u32 reg_data);
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
- uint32_t reg, uint32_t v);
+ uint32_t reg, uint32_t v, uint32_t xcc_id);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
@@ -1137,6 +1176,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
+#define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg))
+#define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v))
#define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg))
#define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v))
#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
@@ -1204,7 +1245,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
/*
* ASICs macro.
*/
-#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
+#define amdgpu_asic_set_vga_state(adev, state) \
+ ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0)
#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
#define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev))
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
@@ -1235,6 +1277,11 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
+#define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i))
+#define for_each_inst(i, inst_mask) \
+ for (i = ffs(inst_mask); i-- != 0; \
+ i = ffs(inst_mask & BIT_MASK_UPPER(i + 1)))
+
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
/* Common functions */
@@ -1246,6 +1293,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
+bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
+bool amdgpu_device_pcie_dynamic_switching_supported(void);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);
@@ -1348,6 +1397,12 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
/* amdgpu_acpi.c */
+struct amdgpu_numa_info {
+ uint64_t size;
+ int pxm;
+ int nid;
+};
+
/* ATCS Device/Driver State */
#define AMDGPU_ATCS_PSC_DEV_STATE_D0 0
#define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3
@@ -1365,15 +1420,32 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state);
int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
+ u64 *tmr_size);
+int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
+ struct amdgpu_numa_info *numa_info);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
+void amdgpu_acpi_release(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
+static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev,
+ u64 *tmr_offset, u64 *tmr_size)
+{
+ return -EINVAL;
+}
+static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev,
+ int xcc_id,
+ struct amdgpu_numa_info *numa_info)
+{
+ return -EINVAL;
+}
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
+static inline void amdgpu_acpi_release(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state) { return 0; }
@@ -1433,4 +1505,8 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
int amdgpu_in_reset(struct amdgpu_device *adev);
+extern const struct attribute_group amdgpu_vram_mgr_attr_group;
+extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
+extern const struct attribute_group amdgpu_flash_attr_group;
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index aeeec211861c..2bca37044ad0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -26,6 +26,7 @@
#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/slab.h>
+#include <linux/xarray.h>
#include <linux/power_supply.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
@@ -38,6 +39,45 @@
#include "amd_acpi.h"
#include "atom.h"
+/* Declare GUID for AMD _DSM method for XCCs */
+static const guid_t amd_xcc_dsm_guid = GUID_INIT(0x8267f5d5, 0xa556, 0x44f2,
+ 0xb8, 0xb4, 0x45, 0x56, 0x2e,
+ 0x8c, 0x5b, 0xec);
+
+#define AMD_XCC_HID_START 3000
+#define AMD_XCC_DSM_GET_NUM_FUNCS 0
+#define AMD_XCC_DSM_GET_SUPP_MODE 1
+#define AMD_XCC_DSM_GET_XCP_MODE 2
+#define AMD_XCC_DSM_GET_VF_XCC_MAPPING 4
+#define AMD_XCC_DSM_GET_TMR_INFO 5
+#define AMD_XCC_DSM_NUM_FUNCS 5
+
+#define AMD_XCC_MAX_HID 24
+
+struct xarray numa_info_xa;
+
+/* Encapsulates the XCD acpi object information */
+struct amdgpu_acpi_xcc_info {
+ struct list_head list;
+ struct amdgpu_numa_info *numa_info;
+ uint8_t xcp_node;
+ uint8_t phy_id;
+ acpi_handle handle;
+};
+
+struct amdgpu_acpi_dev_info {
+ struct list_head list;
+ struct list_head xcc_list;
+ uint16_t bdf;
+ uint16_t supp_xcp_mode;
+ uint16_t xcp_mode;
+ uint16_t mem_mode;
+ uint64_t tmr_base;
+ uint64_t tmr_size;
+};
+
+struct list_head amdgpu_acpi_dev_list;
+
struct amdgpu_atif_notification_cfg {
bool enabled;
int command_code;
@@ -666,7 +706,7 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
atcs_input.size = sizeof(struct atcs_pref_req_input);
/* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
- atcs_input.client_id = adev->pdev->devfn | (adev->pdev->bus->number << 8);
+ atcs_input.client_id = pci_dev_id(adev->pdev);
atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
if (advertise)
@@ -736,7 +776,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
atcs_input.size = sizeof(struct atcs_pwr_shift_input);
/* dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
- atcs_input.dgpu_id = adev->pdev->devfn | (adev->pdev->bus->number << 8);
+ atcs_input.dgpu_id = pci_dev_id(adev->pdev);
atcs_input.dev_acpi_state = dev_state;
atcs_input.drv_state = drv_state;
@@ -801,6 +841,343 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
return r;
}
+#ifdef CONFIG_ACPI_NUMA
+static inline uint64_t amdgpu_acpi_get_numa_size(int nid)
+{
+ /* This is directly using si_meminfo_node implementation as the
+ * function is not exported.
+ */
+ int zone_type;
+ uint64_t managed_pages = 0;
+
+ pg_data_t *pgdat = NODE_DATA(nid);
+
+ for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
+ managed_pages +=
+ zone_managed_pages(&pgdat->node_zones[zone_type]);
+ return managed_pages * PAGE_SIZE;
+}
+
+static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm)
+{
+ struct amdgpu_numa_info *numa_info;
+ int nid;
+
+ numa_info = xa_load(&numa_info_xa, pxm);
+
+ if (!numa_info) {
+ struct sysinfo info;
+
+ numa_info = kzalloc(sizeof(*numa_info), GFP_KERNEL);
+ if (!numa_info)
+ return NULL;
+
+ nid = pxm_to_node(pxm);
+ numa_info->pxm = pxm;
+ numa_info->nid = nid;
+
+ if (numa_info->nid == NUMA_NO_NODE) {
+ si_meminfo(&info);
+ numa_info->size = info.totalram * info.mem_unit;
+ } else {
+ numa_info->size = amdgpu_acpi_get_numa_size(nid);
+ }
+ xa_store(&numa_info_xa, numa_info->pxm, numa_info, GFP_KERNEL);
+ }
+
+ return numa_info;
+}
+#endif
+
+/**
+ * amdgpu_acpi_get_node_id - obtain the NUMA node id for corresponding amdgpu
+ * acpi device handle
+ *
+ * @handle: acpi handle
+ * @numa_info: amdgpu_numa_info structure holding numa information
+ *
+ * Queries the ACPI interface to fetch the corresponding NUMA Node ID for a
+ * given amdgpu acpi device.
+ *
+ * Returns ACPI STATUS OK with Node ID on success or the corresponding failure reason
+ */
+static acpi_status amdgpu_acpi_get_node_id(acpi_handle handle,
+ struct amdgpu_numa_info **numa_info)
+{
+#ifdef CONFIG_ACPI_NUMA
+ u64 pxm;
+ acpi_status status;
+
+ if (!numa_info)
+ return_ACPI_STATUS(AE_ERROR);
+
+ status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ *numa_info = amdgpu_acpi_get_numa_info(pxm);
+
+ if (!*numa_info)
+ return_ACPI_STATUS(AE_ERROR);
+
+ return_ACPI_STATUS(AE_OK);
+#else
+ return_ACPI_STATUS(AE_NOT_EXIST);
+#endif
+}
+
+static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf)
+{
+ struct amdgpu_acpi_dev_info *acpi_dev;
+
+ if (list_empty(&amdgpu_acpi_dev_list))
+ return NULL;
+
+ list_for_each_entry(acpi_dev, &amdgpu_acpi_dev_list, list)
+ if (acpi_dev->bdf == bdf)
+ return acpi_dev;
+
+ return NULL;
+}
+
+static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info,
+ struct amdgpu_acpi_xcc_info *xcc_info, u16 bdf)
+{
+ struct amdgpu_acpi_dev_info *tmp;
+ union acpi_object *obj;
+ int ret = -ENOENT;
+
+ *dev_info = NULL;
+ tmp = kzalloc(sizeof(struct amdgpu_acpi_dev_info), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&tmp->xcc_list);
+ INIT_LIST_HEAD(&tmp->list);
+ tmp->bdf = bdf;
+
+ obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
+ AMD_XCC_DSM_GET_SUPP_MODE, NULL,
+ ACPI_TYPE_INTEGER);
+
+ if (!obj) {
+ acpi_handle_debug(xcc_info->handle,
+ "_DSM function %d evaluation failed",
+ AMD_XCC_DSM_GET_SUPP_MODE);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ tmp->supp_xcp_mode = obj->integer.value & 0xFFFF;
+ ACPI_FREE(obj);
+
+ obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
+ AMD_XCC_DSM_GET_XCP_MODE, NULL,
+ ACPI_TYPE_INTEGER);
+
+ if (!obj) {
+ acpi_handle_debug(xcc_info->handle,
+ "_DSM function %d evaluation failed",
+ AMD_XCC_DSM_GET_XCP_MODE);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ tmp->xcp_mode = obj->integer.value & 0xFFFF;
+ tmp->mem_mode = (obj->integer.value >> 32) & 0xFFFF;
+ ACPI_FREE(obj);
+
+ /* Evaluate DSMs and fill XCC information */
+ obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
+ AMD_XCC_DSM_GET_TMR_INFO, NULL,
+ ACPI_TYPE_PACKAGE);
+
+ if (!obj || obj->package.count < 2) {
+ acpi_handle_debug(xcc_info->handle,
+ "_DSM function %d evaluation failed",
+ AMD_XCC_DSM_GET_TMR_INFO);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ tmp->tmr_base = obj->package.elements[0].integer.value;
+ tmp->tmr_size = obj->package.elements[1].integer.value;
+ ACPI_FREE(obj);
+
+ DRM_DEBUG_DRIVER(
+ "New dev(%x): Supported xcp mode: %x curr xcp_mode : %x mem mode : %x, tmr base: %llx tmr size: %llx ",
+ tmp->bdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode,
+ tmp->tmr_base, tmp->tmr_size);
+ list_add_tail(&tmp->list, &amdgpu_acpi_dev_list);
+ *dev_info = tmp;
+
+ return 0;
+
+out:
+ if (obj)
+ ACPI_FREE(obj);
+ kfree(tmp);
+
+ return ret;
+}
+
+static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info,
+ u16 *bdf)
+{
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = -ENOENT;
+
+ obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
+ AMD_XCC_DSM_GET_NUM_FUNCS, NULL,
+ ACPI_TYPE_INTEGER);
+
+ if (!obj || obj->integer.value != AMD_XCC_DSM_NUM_FUNCS)
+ goto out;
+ ACPI_FREE(obj);
+
+ /* Evaluate DSMs and fill XCC information */
+ obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
+ AMD_XCC_DSM_GET_VF_XCC_MAPPING, NULL,
+ ACPI_TYPE_INTEGER);
+
+ if (!obj) {
+ acpi_handle_debug(xcc_info->handle,
+ "_DSM function %d evaluation failed",
+ AMD_XCC_DSM_GET_VF_XCC_MAPPING);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* PF xcc id [39:32] */
+ xcc_info->phy_id = (obj->integer.value >> 32) & 0xFF;
+ /* xcp node of this xcc [47:40] */
+ xcc_info->xcp_node = (obj->integer.value >> 40) & 0xFF;
+ /* PF bus/dev/fn of this xcc [63:48] */
+ *bdf = (obj->integer.value >> 48) & 0xFFFF;
+ ACPI_FREE(obj);
+ obj = NULL;
+
+ status =
+ amdgpu_acpi_get_node_id(xcc_info->handle, &xcc_info->numa_info);
+
+ /* TODO: check if this check is required */
+ if (ACPI_SUCCESS(status))
+ ret = 0;
+out:
+ if (obj)
+ ACPI_FREE(obj);
+
+ return ret;
+}
+
+static int amdgpu_acpi_enumerate_xcc(void)
+{
+ struct amdgpu_acpi_dev_info *dev_info = NULL;
+ struct amdgpu_acpi_xcc_info *xcc_info;
+ struct acpi_device *acpi_dev;
+ char hid[ACPI_ID_LEN];
+ int ret, id;
+ u16 bdf;
+
+ INIT_LIST_HEAD(&amdgpu_acpi_dev_list);
+ xa_init(&numa_info_xa);
+
+ for (id = 0; id < AMD_XCC_MAX_HID; id++) {
+ sprintf(hid, "%s%d", "AMD", AMD_XCC_HID_START + id);
+ acpi_dev = acpi_dev_get_first_match_dev(hid, NULL, -1);
+ /* These ACPI objects are expected to be in sequential order. If
+ * one is not found, no need to check the rest.
+ */
+ if (!acpi_dev) {
+ DRM_DEBUG_DRIVER("No matching acpi device found for %s",
+ hid);
+ break;
+ }
+
+ xcc_info = kzalloc(sizeof(struct amdgpu_acpi_xcc_info),
+ GFP_KERNEL);
+ if (!xcc_info) {
+ DRM_ERROR("Failed to allocate memory for xcc info\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&xcc_info->list);
+ xcc_info->handle = acpi_device_handle(acpi_dev);
+ acpi_dev_put(acpi_dev);
+
+ ret = amdgpu_acpi_get_xcc_info(xcc_info, &bdf);
+ if (ret) {
+ kfree(xcc_info);
+ continue;
+ }
+
+ dev_info = amdgpu_acpi_get_dev(bdf);
+
+ if (!dev_info)
+ ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, bdf);
+
+ if (ret == -ENOMEM)
+ return ret;
+
+ if (!dev_info) {
+ kfree(xcc_info);
+ continue;
+ }
+
+ list_add_tail(&xcc_info->list, &dev_info->xcc_list);
+ }
+
+ return 0;
+}
+
+int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
+ u64 *tmr_size)
+{
+ struct amdgpu_acpi_dev_info *dev_info;
+ u16 bdf;
+
+ if (!tmr_offset || !tmr_size)
+ return -EINVAL;
+
+ bdf = pci_dev_id(adev->pdev);
+ dev_info = amdgpu_acpi_get_dev(bdf);
+ if (!dev_info)
+ return -ENOENT;
+
+ *tmr_offset = dev_info->tmr_base;
+ *tmr_size = dev_info->tmr_size;
+
+ return 0;
+}
+
+int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
+ struct amdgpu_numa_info *numa_info)
+{
+ struct amdgpu_acpi_dev_info *dev_info;
+ struct amdgpu_acpi_xcc_info *xcc_info;
+ u16 bdf;
+
+ if (!numa_info)
+ return -EINVAL;
+
+ bdf = pci_dev_id(adev->pdev);
+ dev_info = amdgpu_acpi_get_dev(bdf);
+ if (!dev_info)
+ return -ENOENT;
+
+ list_for_each_entry(xcc_info, &dev_info->xcc_list, list) {
+ if (xcc_info->phy_id == xcc_id) {
+ memcpy(numa_info, xcc_info->numa_info,
+ sizeof(*numa_info));
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
/**
* amdgpu_acpi_event - handle notify events
*
@@ -1054,6 +1431,36 @@ void amdgpu_acpi_detect(void)
} else {
atif->backlight_caps.caps_valid = false;
}
+
+ amdgpu_acpi_enumerate_xcc();
+}
+
+void amdgpu_acpi_release(void)
+{
+ struct amdgpu_acpi_dev_info *dev_info, *dev_tmp;
+ struct amdgpu_acpi_xcc_info *xcc_info, *xcc_tmp;
+ struct amdgpu_numa_info *numa_info;
+ unsigned long index;
+
+ xa_for_each(&numa_info_xa, index, numa_info) {
+ kfree(numa_info);
+ xa_erase(&numa_info_xa, index);
+ }
+
+ if (list_empty(&amdgpu_acpi_dev_list))
+ return;
+
+ list_for_each_entry_safe(dev_info, dev_tmp, &amdgpu_acpi_dev_list,
+ list) {
+ list_for_each_entry_safe(xcc_info, xcc_tmp, &dev_info->xcc_list,
+ list) {
+ list_del(&xcc_info->list);
+ kfree(xcc_info);
+ }
+
+ list_del(&dev_info->list);
+ kfree(dev_info);
+ }
}
#if IS_ENABLED(CONFIG_SUSPEND)
@@ -1092,16 +1499,20 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
* S0ix even though the system is suspending to idle, so return false
* in that case.
*/
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- dev_warn_once(adev->dev,
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ dev_err_once(adev->dev,
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
"To use suspend-to-idle change the sleep mode in BIOS setup.\n");
+ return false;
+ }
#if !IS_ENABLED(CONFIG_AMD_PMC)
- dev_warn_once(adev->dev,
+ dev_err_once(adev->dev,
"Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
-#endif /* CONFIG_AMD_PMC */
+ return false;
+#else
return true;
+#endif /* CONFIG_AMD_PMC */
}
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 0385f7f69278..df633e9ce920 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -53,7 +53,6 @@ int amdgpu_amdkfd_init(void)
amdgpu_amdkfd_total_mem_size *= si.mem_unit;
ret = kgd2kfd_init();
- amdgpu_amdkfd_gpuvm_init_mem_limits();
kfd_initialized = !ret;
return ret;
@@ -143,6 +142,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
int i;
int last_valid_bit;
+ amdgpu_amdkfd_gpuvm_init_mem_limits();
+
if (adev->kfd.dev) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap =
@@ -162,7 +163,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* clear
*/
bitmap_complement(gpu_resources.cp_queue_bitmap,
- adev->gfx.mec.queue_bitmap,
+ adev->gfx.mec_bitmap[0].queue_bitmap,
KGD_MAX_QUEUES);
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
@@ -225,16 +226,6 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
kgd2kfd_suspend(adev->kfd.dev, run_pm);
}
-int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
-{
- int r = 0;
-
- if (adev->kfd.dev)
- r = kgd2kfd_resume_iommu(adev->kfd.dev);
-
- return r;
-}
-
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{
int r = 0;
@@ -427,14 +418,23 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
}
void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
- struct kfd_local_mem_info *mem_info)
+ struct kfd_local_mem_info *mem_info,
+ struct amdgpu_xcp *xcp)
{
memset(mem_info, 0, sizeof(*mem_info));
- mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
- mem_info->local_mem_size_private = adev->gmc.real_vram_size -
+ if (xcp) {
+ if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size)
+ mem_info->local_mem_size_public =
+ KFD_XCP_MEMORY_SIZE(adev, xcp->id);
+ else
+ mem_info->local_mem_size_private =
+ KFD_XCP_MEMORY_SIZE(adev, xcp->id);
+ } else {
+ mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
+ mem_info->local_mem_size_private = adev->gmc.real_vram_size -
adev->gmc.visible_vram_size;
-
+ }
mem_info->vram_width = adev->gmc.vram_width;
pr_debug("Address base: %pap public 0x%llx private 0x%llx\n",
@@ -497,7 +497,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
struct amdgpu_device **dmabuf_adev,
uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size,
- uint32_t *flags)
+ uint32_t *flags, int8_t *xcp_id)
{
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
@@ -541,6 +541,8 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
*flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
}
+ if (xcp_id)
+ *xcp_id = bo->xcp_id;
out_put:
dma_buf_put(dma_buf);
@@ -732,17 +734,19 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
if (adev->family == AMDGPU_FAMILY_AI) {
int i;
- for (i = 0; i < adev->num_vmhubs; i++)
+ for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
} else {
- amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 0);
}
return 0;
}
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
- uint16_t pasid, enum TLB_FLUSH_TYPE flush_type)
+ uint16_t pasid,
+ enum TLB_FLUSH_TYPE flush_type,
+ uint32_t inst)
{
bool all_hub = false;
@@ -750,7 +754,7 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
adev->family == AMDGPU_FAMILY_RV)
all_hub = true;
- return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
+ return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub, inst);
}
bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
@@ -758,11 +762,32 @@ bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
return adev->have_atomics_support;
}
+void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev)
+{
+ amdgpu_device_flush_hdp(adev, NULL);
+}
+
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset)
{
amdgpu_umc_poison_handler(adev, reset);
}
+int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
+ uint32_t *payload)
+{
+ int ret;
+
+ /* Device or IH ring is not ready so bail. */
+ ret = amdgpu_ih_wait_on_checkpoint_process_ts(adev, &adev->irq.ih);
+ if (ret)
+ return ret;
+
+ /* Send payload to fence KFD interrupts */
+ amdgpu_amdkfd_interrupt(adev, payload);
+
+ return 0;
+}
+
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
{
if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status)
@@ -770,3 +795,78 @@ bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
else
return false;
}
+
+int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
+{
+ return kgd2kfd_check_and_lock_kfd();
+}
+
+void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev)
+{
+ kgd2kfd_unlock_kfd();
+}
+
+
+u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
+{
+ u64 tmp;
+ s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id);
+
+ if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) {
+ tmp = adev->gmc.mem_partitions[mem_id].size;
+ do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
+ return ALIGN_DOWN(tmp, PAGE_SIZE);
+ } else {
+ return adev->gmc.real_vram_size;
+ }
+}
+
+int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
+ u32 inst)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ struct amdgpu_ring_funcs *ring_funcs;
+ struct amdgpu_ring *ring;
+ int r = 0;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
+ if (!ring_funcs)
+ return -ENOMEM;
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ r = -ENOMEM;
+ goto free_ring_funcs;
+ }
+
+ ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE;
+ ring->doorbell_index = doorbell_off;
+ ring->funcs = ring_funcs;
+
+ spin_lock(&kiq->ring_lock);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock(&kiq->ring_lock);
+ r = -ENOMEM;
+ goto free_ring;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0);
+
+ if (kiq_ring->sched.ready && !adev->job_hang)
+ r = amdgpu_ring_test_helper(kiq_ring);
+
+ spin_unlock(&kiq->ring_lock);
+
+free_ring:
+ kfree(ring);
+
+free_ring_funcs:
+ kfree(ring_funcs);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 01ba3589b60a..2fe9860725bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -25,15 +25,17 @@
#ifndef AMDGPU_AMDKFD_H_INCLUDED
#define AMDGPU_AMDKFD_H_INCLUDED
+#include <linux/list.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/mmu_notifier.h>
+#include <linux/memremap.h>
#include <kgd_kfd_interface.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include "amdgpu_sync.h"
#include "amdgpu_vm.h"
+#include "amdgpu_xcp.h"
extern uint64_t amdgpu_amdkfd_total_mem_size;
@@ -69,8 +71,7 @@ struct kgd_mem {
struct hmm_range *range;
struct list_head attachments;
/* protected by amdkfd_process_info.lock */
- struct ttm_validate_buffer validate_list;
- struct ttm_validate_buffer resv_list;
+ struct list_head validate_list;
uint32_t domain;
unsigned int mapped_to_gpu_memory;
uint64_t va;
@@ -97,10 +98,13 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
- int64_t vram_used;
- uint64_t vram_used_aligned;
+ int64_t vram_used[MAX_XCP];
+ uint64_t vram_used_aligned[MAX_XCP];
bool init_complete;
struct work_struct reset_work;
+
+ /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
+ struct dev_pagemap pgmap;
};
enum kgd_engine_type {
@@ -144,13 +148,14 @@ int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
-int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev);
+int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev);
+void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev);
int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
@@ -160,7 +165,8 @@ bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
uint16_t vmid);
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
- uint16_t pasid, enum TLB_FLUSH_TYPE flush_type);
+ uint16_t pasid, enum TLB_FLUSH_TYPE flush_type,
+ uint32_t inst);
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
@@ -224,7 +230,8 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
enum kgd_engine_type type);
void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
- struct kfd_local_mem_info *mem_info);
+ struct kfd_local_mem_info *mem_info,
+ struct amdgpu_xcp *xcp);
uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev);
uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev);
@@ -234,13 +241,17 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
struct amdgpu_device **dmabuf_adev,
uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size,
- uint32_t *flags);
+ uint32_t *flags, int8_t *xcp_id);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
struct amdgpu_device *src);
int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
struct amdgpu_device *src,
bool is_min);
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
+int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
+ uint32_t *payload);
+int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
+ u32 inst);
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
@@ -279,7 +290,8 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
void *drm_priv);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
-size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev);
+size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
+ uint8_t xcp_id);
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct amdgpu_device *adev, uint64_t va, uint64_t size,
void *drm_priv, struct kgd_mem **mem,
@@ -310,6 +322,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
uint64_t *mmap_offset);
int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
struct dma_buf **dmabuf);
+void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
@@ -319,9 +332,18 @@ void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev);
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 alloc_flag);
+ uint64_t size, u32 alloc_flag, int8_t xcp_id);
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 alloc_flag);
+ uint64_t size, u32 alloc_flag, int8_t xcp_id);
+
+u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id);
+
+#define KFD_XCP_MEM_ID(adev, xcp_id) \
+ ((adev)->xcp_mgr && (xcp_id) >= 0 ?\
+ (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1)
+
+#define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id))
+
#if IS_ENABLED(CONFIG_HSA_AMD)
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
@@ -352,6 +374,17 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
{
}
#endif
+
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+int kgd2kfd_init_zone_device(struct amdgpu_device *adev);
+#else
+static inline
+int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
+{
+ return 0;
+}
+#endif
+
/* KGD2KFD callbacks */
int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger);
int kgd2kfd_resume_mm(struct mm_struct *mm);
@@ -365,13 +398,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
-int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
+int kgd2kfd_check_and_lock_kfd(void);
+void kgd2kfd_unlock_kfd(void);
#else
static inline int kgd2kfd_init(void)
{
@@ -403,11 +437,6 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
}
-static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
-{
- return 0;
-}
-
static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
return 0;
@@ -437,5 +466,14 @@ static inline
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
}
+
+static inline int kgd2kfd_check_and_lock_kfd(void)
+{
+ return 0;
+}
+
+static inline void kgd2kfd_unlock_kfd(void)
+{
+}
#endif
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
index 4485bb29bec9..aff08321e976 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -23,6 +23,145 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_amdkfd_arcturus.h"
#include "amdgpu_amdkfd_gfx_v9.h"
+#include "amdgpu_amdkfd_aldebaran.h"
+#include "gc/gc_9_4_2_offset.h"
+#include "gc/gc_9_4_2_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+/*
+ * Returns TRAP_EN, EXCP_EN and EXCP_REPLACE.
+ *
+ * restore_dbg_registers is ignored here but is a general interface requirement
+ * for devices that support GFXOFF and where the RLC save/restore list
+ * does not support hw registers for debugging i.e. the driver has to manually
+ * initialize the debug mode registers after it has disabled GFX off during the
+ * debug session.
+ */
+uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_aldebaran_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, keep_trap_enabled);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+static int kgd_aldebaran_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported)
+{
+ *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION;
+
+ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR &&
+ trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE)
+ return -EPERM;
+
+ return 0;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_RPLACE. */
+static uint32_t kgd_aldebaran_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev)
+
+{
+ uint32_t data = 0;
+
+ *trap_mask_prev = REG_GET_FIELD(kfd_dbg_trap_cntl_prev, SPI_GDBG_PER_VMID_CNTL, EXCP_EN);
+ trap_mask_bits = (trap_mask_bits & trap_mask_request) |
+ (*trap_mask_prev & ~trap_mask_request);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, trap_mask_bits);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override);
+
+ return data;
+}
+
+uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, LAUNCH_MODE, wave_launch_mode);
+
+ return data;
+}
+
+#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H)
+static uint32_t kgd_gfx_aldebaran_set_address_watch(
+ struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst)
+{
+ uint32_t watch_address_high;
+ uint32_t watch_address_low;
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+ watch_address_low = lower_32_bits(watch_address);
+ watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 6);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 1);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_H) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_high);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_L) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_low);
+
+ return watch_address_cntl;
+}
const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
@@ -42,5 +181,14 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
- .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
+ .enable_debug_trap = kgd_aldebaran_enable_debug_trap,
+ .disable_debug_trap = kgd_aldebaran_disable_debug_trap,
+ .validate_trap_override_request = kgd_aldebaran_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_aldebaran_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_aldebaran_set_address_watch,
+ .clear_address_watch = kgd_gfx_v9_clear_address_watch,
+ .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
+ .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
+ .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h
new file mode 100644
index 000000000000..a7bdaf8d82dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid);
+uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 4191af5a3f13..625db444df1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_amdkfd_arcturus.h"
+#include "amdgpu_reset.h"
#include "sdma0/sdma0_4_2_2_offset.h"
#include "sdma0/sdma0_4_2_2_sh_mask.h"
#include "sdma1/sdma1_4_2_2_offset.h"
@@ -48,6 +49,8 @@
#include "amdgpu_amdkfd_gfx_v9.h"
#include "gfxhub_v1_0.h"
#include "mmhub_v9_4.h"
+#include "gc/gc_9_0_offset.h"
+#include "gc/gc_9_0_sh_mask.h"
#define HQD_N_REGS 56
#define DUMP_REG(addr) do { \
@@ -276,6 +279,117 @@ int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
return 0;
}
+/*
+ * Helper used to suspend/resume gfx pipe for image post process work to set
+ * barrier behaviour.
+ */
+static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool suspend)
+{
+ int i, r = 0;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+ if (!(ring && ring->sched.thread))
+ continue;
+
+ /* stop secheduler and drain ring. */
+ if (suspend) {
+ drm_sched_stop(&ring->sched, NULL);
+ r = amdgpu_fence_wait_empty(ring);
+ if (r)
+ goto out;
+ } else {
+ drm_sched_start(&ring->sched, false);
+ }
+ }
+
+out:
+ /* return on resume or failure to drain rings. */
+ if (!suspend || r)
+ return r;
+
+ return amdgpu_device_ip_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GFX);
+}
+
+static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_waitcnt)
+{
+ uint32_t data;
+
+ WRITE_ONCE(adev->barrier_has_auto_waitcnt, enable_waitcnt);
+
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return;
+
+ amdgpu_amdkfd_suspend(adev, false);
+
+ if (suspend_resume_compute_scheduler(adev, true))
+ goto out;
+
+ data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG));
+ data = REG_SET_FIELD(data, SQ_CONFIG, DISABLE_BARRIER_WAITCNT,
+ !enable_waitcnt);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG), data);
+
+out:
+ suspend_resume_compute_scheduler(adev, false);
+
+ amdgpu_amdkfd_resume(adev, false);
+
+ up_read(&adev->reset_domain->sem);
+}
+
+/*
+ * restore_dbg_registers is ignored here but is a general interface requirement
+ * for devices that support GFXOFF and where the RLC save/restore list
+ * does not support hw registers for debugging i.e. the driver has to manually
+ * initialize the debug mode registers after it has disabled GFX off during the
+ * debug session.
+ */
+static uint32_t kgd_arcturus_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid)
+{
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ set_barrier_auto_waitcnt(adev, true);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+/*
+ * keep_trap_enabled is ignored here but is a general interface requirement
+ * for devices that support multi-process debugging where the performance
+ * overhead from trap temporary setup needs to be bypassed when the debug
+ * session has ended.
+ */
+static uint32_t kgd_arcturus_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ set_barrier_auto_waitcnt(adev, false);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
const struct kfd2kgd_calls arcturus_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -294,6 +408,15 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base =
kgd_gfx_v9_set_vm_context_page_table_base,
+ .enable_debug_trap = kgd_arcturus_enable_debug_trap,
+ .disable_debug_trap = kgd_arcturus_disable_debug_trap,
+ .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v9_set_address_watch,
+ .clear_address_watch = kgd_gfx_v9_clear_address_watch,
+ .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
+ .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
new file mode 100644
index 000000000000..490c8f5ddb60
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
@@ -0,0 +1,549 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_amdkfd_gfx_v9.h"
+#include "amdgpu_amdkfd_aldebaran.h"
+#include "gc/gc_9_4_3_offset.h"
+#include "gc/gc_9_4_3_sh_mask.h"
+#include "athub/athub_1_8_0_offset.h"
+#include "athub/athub_1_8_0_sh_mask.h"
+#include "oss/osssys_4_4_2_offset.h"
+#include "oss/osssys_4_4_2_sh_mask.h"
+#include "v9_structs.h"
+#include "soc15.h"
+#include "sdma/sdma_4_4_2_offset.h"
+#include "sdma/sdma_4_4_2_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+ return (struct v9_sdma_mqd *)mqd;
+}
+
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
+ unsigned int engine_id,
+ unsigned int queue_id)
+{
+ uint32_t sdma_engine_reg_base =
+ SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, engine_id),
+ regSDMA_RLC0_RB_CNTL) -
+ regSDMA_RLC0_RB_CNTL;
+ uint32_t retval = sdma_engine_reg_base +
+ queue_id * (regSDMA_RLC1_RB_CNTL - regSDMA_RLC0_RB_CNTL);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
+ return retval;
+}
+
+static int kgd_gfx_v9_4_3_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ unsigned long end_jiffies;
+ uint32_t data;
+ uint64_t data64;
+ uint64_t __user *wptr64 = (uint64_t __user *)wptr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS);
+ if (data & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL_OFFSET,
+ m->sdmax_rlcx_doorbell_offset);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 1);
+ if (read_user_wptr(mm, wptr64, data64)) {
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR,
+ lower_32_bits(data64));
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI,
+ upper_32_bits(data64));
+ } else {
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+ }
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 0);
+
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+6+7+12)
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = regSDMA_RLC0_RB_CNTL; reg <= regSDMA_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = regSDMA_RLC0_STATUS; reg <= regSDMA_RLC0_CSA_ADDR_HI; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = regSDMA_RLC0_IB_SUB_REMAIN;
+ reg <= regSDMA_RLC0_MINOR_PTR_UPDATE; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = regSDMA_RLC0_MIDCMD_DATA0;
+ reg <= regSDMA_RLC0_MIDCMD_CNTL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static bool kgd_gfx_v9_4_3_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+static int kgd_gfx_v9_4_3_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
+ unsigned int utimeout)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t temp;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL);
+ temp = temp & ~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL) |
+ SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ m->sdmax_rlcx_rb_rptr =
+ RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr_hi =
+ RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI);
+
+ return 0;
+}
+
+static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev,
+ u32 pasid, unsigned int vmid, uint32_t xcc_inst)
+{
+ unsigned long timeout;
+ unsigned int reg;
+ unsigned int phy_inst = GET_INST(GC, xcc_inst);
+ /* Every two XCCs share one AID */
+ unsigned int aid = phy_inst / 2;
+
+ /*
+ * We have to assume that there is no outstanding mapping.
+ * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+ * a mapping is in progress or because a mapping finished
+ * and the SW cleared it.
+ * So the protocol is to always wait & clear.
+ */
+ uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+ ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+ WREG32(SOC15_REG_OFFSET(ATHUB, 0,
+ regATC_VMID0_PASID_MAPPING) + vmid, pasid_mapping);
+
+ timeout = jiffies + msecs_to_jiffies(10);
+ while (!(RREG32(SOC15_REG_OFFSET(ATHUB, 0,
+ regATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
+ (1U << vmid))) {
+ if (time_after(jiffies, timeout)) {
+ pr_err("Fail to program VMID-PASID mapping\n");
+ return -ETIME;
+ }
+ cpu_relax();
+ }
+
+ WREG32(SOC15_REG_OFFSET(ATHUB, 0,
+ regATC_VMID_PASID_MAPPING_UPDATE_STATUS),
+ 1U << vmid);
+
+ reg = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX));
+ /* Every 4 numbers is a cycle. 1st is AID, 2nd and 3rd are XCDs,
+ * and the 4th is reserved. Therefore "aid * 4 + (xcc_inst % 2) + 1"
+ * programs _LUT for XCC and "aid * 4" for AID where the XCC connects
+ * to.
+ */
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX),
+ aid * 4 + (phy_inst % 2) + 1);
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid,
+ pasid_mapping);
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX),
+ aid * 4);
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid,
+ pasid_mapping);
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX), reg);
+
+ return 0;
+}
+
+static inline struct v9_mqd *get_mqd(void *mqd)
+{
+ return (struct v9_mqd *)mqd;
+}
+
+static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
+{
+ struct v9_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, hqd_base, hqd_end, data;
+
+ m = get_mqd(mqd);
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+
+ /* HQD registers extend to CP_HQD_AQL_DISPATCH_ID_HI */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+ hqd_base = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_MQD_BASE_ADDR);
+ hqd_end = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_AQL_DISPATCH_ID_HI);
+
+ for (reg = hqd_base; reg <= hqd_end; reg++)
+ WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
+
+
+ /* Activate doorbell logic before triggering WPTR poll. */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_DOORBELL_CONTROL),
+ data);
+
+ if (wptr) {
+ /* Don't read wptr with get_user because the user
+ * context may not be accessible (if this function
+ * runs in a work queue). Instead trigger a one-shot
+ * polling read from memory in the CP. This assumes
+ * that wptr is GPU-accessible in the queue's VMID via
+ * ATC or SVM. WPTR==RPTR before starting the poll so
+ * the CP starts fetching new commands from the right
+ * place.
+ *
+ * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
+ * tricky. Assume that the queue didn't overflow. The
+ * number of valid bits in the 32-bit RPTR depends on
+ * the queue size. The remaining bits are taken from
+ * the saved 64-bit WPTR. If the WPTR wrapped, add the
+ * queue size.
+ */
+ uint32_t queue_size =
+ 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
+ CP_HQD_PQ_CONTROL, QUEUE_SIZE);
+ uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
+
+ if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
+ guessed_wptr += queue_size;
+ guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
+ guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
+
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_LO),
+ lower_32_bits(guessed_wptr));
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI),
+ upper_32_bits(guessed_wptr));
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR),
+ lower_32_bits((uintptr_t)wptr));
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+ regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+ upper_32_bits((uintptr_t)wptr));
+ WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_PQ_WPTR_POLL_CNTL1),
+ (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id,
+ queue_id));
+ }
+
+ /* Start the EOP fetcher */
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_EOP_RPTR),
+ REG_SET_FIELD(m->cp_hqd_eop_rptr,
+ CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_ACTIVE), data);
+
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return 0;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v9_4_3_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+static int kgd_gfx_v9_4_3_validate_trap_override_request(
+ struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported)
+{
+ *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION |
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START |
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR &&
+ trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE)
+ return -EPERM;
+
+ return 0;
+}
+
+static uint32_t trap_mask_map_sw_to_hw(uint32_t mask)
+{
+ uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0;
+ uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0;
+ uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION);
+ uint32_t ret;
+
+ ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en);
+ ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start);
+ ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end);
+
+ return ret;
+}
+
+static uint32_t trap_mask_map_hw_to_sw(uint32_t mask)
+{
+ uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN);
+
+ if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START))
+ ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START;
+
+ if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END))
+ ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+ return ret;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v9_4_3_set_wave_launch_trap_override(
+ struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev)
+
+{
+ uint32_t data = 0;
+
+ *trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev);
+
+ data = (trap_mask_bits & trap_mask_request) |
+ (*trap_mask_prev & ~trap_mask_request);
+ data = trap_mask_map_sw_to_hw(data);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override);
+
+ return data;
+}
+
+#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H)
+static uint32_t kgd_gfx_v9_4_3_set_address_watch(
+ struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst)
+{
+ uint32_t watch_address_high;
+ uint32_t watch_address_low;
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+ watch_address_low = lower_32_bits(watch_address);
+ watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 7);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 1);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+ regTCP_WATCH0_ADDR_H) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_high);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+ regTCP_WATCH0_ADDR_L) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_low);
+
+ return watch_address_cntl;
+}
+
+static uint32_t kgd_gfx_v9_4_3_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id)
+{
+ return 0;
+}
+
+const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
+ .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_gfx_v9_init_interrupts,
+ .hqd_load = kgd_gfx_v9_4_3_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
+ .hqd_sdma_load = kgd_gfx_v9_4_3_hqd_sdma_load,
+ .hqd_dump = kgd_gfx_v9_hqd_dump,
+ .hqd_sdma_dump = kgd_gfx_v9_4_3_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_gfx_v9_4_3_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_gfx_v9_hqd_destroy,
+ .hqd_sdma_destroy = kgd_gfx_v9_4_3_hqd_sdma_destroy,
+ .wave_control_execute = kgd_gfx_v9_wave_control_execute,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+ .set_vm_context_page_table_base =
+ kgd_gfx_v9_set_vm_context_page_table_base,
+ .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
+ .program_trap_handler_settings =
+ kgd_gfx_v9_program_trap_handler_settings,
+ .build_grace_period_packet_info =
+ kgd_gfx_v9_build_grace_period_packet_info,
+ .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
+ .enable_debug_trap = kgd_aldebaran_enable_debug_trap,
+ .disable_debug_trap = kgd_gfx_v9_4_3_disable_debug_trap,
+ .validate_trap_override_request =
+ kgd_gfx_v9_4_3_validate_trap_override_request,
+ .set_wave_launch_trap_override =
+ kgd_gfx_v9_4_3_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
+ .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 9378fc79e9ea..f1f2c24de081 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -21,6 +21,7 @@
*/
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_amdkfd_gfx_v10.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "athub/athub_2_0_0_offset.h"
@@ -31,6 +32,7 @@
#include "v10_structs.h"
#include "nv.h"
#include "nvd.h"
+#include <uapi/linux/kfd_ioctl.h>
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -79,7 +81,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases)
+ uint32_t sh_mem_bases, uint32_t inst)
{
lock_srbm(adev, 0, 0, 0, vmid);
@@ -91,7 +93,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi
}
static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
- unsigned int vmid)
+ unsigned int vmid, uint32_t inst)
{
/*
* We have to assume that there is no outstanding mapping.
@@ -135,7 +137,8 @@ static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
* but still works
*/
-static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
{
uint32_t mec;
uint32_t pipe;
@@ -205,7 +208,7 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr, uint32_t wptr_shift,
- uint32_t wptr_mask, struct mm_struct *mm)
+ uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
{
struct v10_compute_mqd *m;
uint32_t *mqd_hqd;
@@ -286,9 +289,9 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t doorbell_off)
+ uint32_t doorbell_off, uint32_t inst)
{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v10_compute_mqd *m;
uint32_t mec, pipe;
int r;
@@ -303,7 +306,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@@ -330,7 +333,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;
@@ -338,7 +341,7 @@ out_unlock:
static int kgd_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs)
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
{
uint32_t i = 0, reg;
#define HQD_N_REGS 56
@@ -469,7 +472,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
uint32_t act;
bool retval = false;
@@ -510,7 +513,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
@@ -673,7 +676,7 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
- uint32_t sq_cmd)
+ uint32_t sq_cmd, uint32_t inst)
{
uint32_t data = 0;
@@ -708,8 +711,298 @@ static void set_vm_context_page_table_base(struct amdgpu_device *adev,
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
+/*
+ * GFX10 helper for wave launch stall requirements on debug trap setting.
+ *
+ * vmid:
+ * Target VMID to stall/unstall.
+ *
+ * stall:
+ * 0-unstall wave launch (enable), 1-stall wave launch (disable).
+ * After wavefront launch has been stalled, allocated waves must drain from
+ * SPI in order for debug trap settings to take effect on those waves.
+ * This is roughly a ~3500 clock cycle wait on SPI where a read on
+ * SPI_GDBG_WAVE_CNTL translates to ~32 clock cycles.
+ * KGD_GFX_V10_WAVE_LAUNCH_SPI_DRAIN_LATENCY indicates the number of reads required.
+ *
+ * NOTE: We can afford to clear the entire STALL_VMID field on unstall
+ * because current GFX10 chips cannot support multi-process debugging due to
+ * trap configuration and masking being limited to global scope. Always
+ * assume single process conditions.
+ *
+ */
+
+#define KGD_GFX_V10_WAVE_LAUNCH_SPI_DRAIN_LATENCY 110
+static void kgd_gfx_v10_set_wave_launch_stall(struct amdgpu_device *adev, uint32_t vmid, bool stall)
+{
+ uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+ int i;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID,
+ stall ? 1 << vmid : 0);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
+
+ if (!stall)
+ return;
+
+ for (i = 0; i < KGD_GFX_V10_WAVE_LAUNCH_SPI_DRAIN_LATENCY; i++)
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+}
+
+uint32_t kgd_gfx_v10_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid)
+{
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true);
+
+ /* assume gfx off is disabled for the debug session if rlc restore not supported. */
+ if (restore_dbg_registers) {
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
+ VMID_SEL, 1 << vmid);
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
+ TRAP_EN, 1);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+ }
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+int kgd_gfx_v10_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported)
+{
+ *trap_mask_supported &= KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH;
+
+ /* The SPI_GDBG_TRAP_MASK register is global and affects all
+ * processes. Only allow OR-ing the address-watch bit, since
+ * this only affects processes under the debugger. Other bits
+ * should stay 0 to avoid the debugger interfering with other
+ * processes.
+ */
+ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR)
+ return -EINVAL;
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v10_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev)
+{
+ uint32_t data, wave_cntl_prev;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ wave_cntl_prev = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true);
+
+ data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK));
+ *trap_mask_prev = REG_GET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN);
+
+ trap_mask_bits = (trap_mask_bits & trap_mask_request) |
+ (*trap_mask_prev & ~trap_mask_request);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN, trap_mask_bits);
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, REPLACE, trap_override);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
+
+ /* We need to preserve wave launch mode stall settings. */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), wave_cntl_prev);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+ bool is_mode_set = !!wave_launch_mode;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
+ VMID_MASK, is_mode_set ? 1 << vmid : 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
+ MODE, is_mode_set ? wave_launch_mode : 0);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+#define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H)
+uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst)
+{
+ uint32_t watch_address_high;
+ uint32_t watch_address_low;
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+
+ watch_address_low = lower_32_bits(watch_address);
+ watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VMID,
+ debug_vmid);
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 7);
+
+ /* Turning off this watch point until we set all the registers */
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 0);
+
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_cntl);
+
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_high);
+
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_low);
+
+ /* Enable the watch point */
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 1);
+
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_cntl);
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id)
+{
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_cntl);
+
+ return 0;
+}
+
+
+/* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
+ * The values read are:
+ * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads.
+ * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads.
+ * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads.
+ * gws_wait_time -- Wait Count for Global Wave Syncs.
+ * que_sleep_wait_time -- Wait Count for Dequeue Retry.
+ * sch_wave_wait_time -- Wait Count for Scheduling Wave Message.
+ * sem_rearm_wait_time -- Wait Count for Semaphore re-arm.
+ * deq_retry_wait_time -- Wait Count for Global Wave Syncs.
+ */
+void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
+ uint32_t *wait_times,
+ uint32_t inst)
+
+{
+ *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
+}
+
+void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
+ uint32_t wait_times,
+ uint32_t grace_period,
+ uint32_t *reg_offset,
+ uint32_t *reg_data,
+ uint32_t inst)
+{
+ *reg_data = wait_times;
+
+ /*
+ * The CP cannont handle a 0 grace period input and will result in
+ * an infinite grace period being set so set to 1 to prevent this.
+ */
+ if (grace_period == 0)
+ grace_period = 1;
+
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ SCH_WAVE,
+ grace_period);
+
+ *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
+}
+
static void program_trap_handler_settings(struct amdgpu_device *adev,
- uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
+ uint32_t inst)
{
lock_srbm(adev, 0, 0, 0, vmid);
@@ -750,5 +1043,14 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .enable_debug_trap = kgd_gfx_v10_enable_debug_trap,
+ .disable_debug_trap = kgd_gfx_v10_disable_debug_trap,
+ .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v10_set_address_watch,
+ .clear_address_watch = kgd_gfx_v10_clear_address_watch,
+ .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
+ .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
.program_trap_handler_settings = program_trap_handler_settings,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
new file mode 100644
index 000000000000..ecaead24e8c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+uint32_t kgd_gfx_v10_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid);
+uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid);
+int kgd_gfx_v10_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported);
+uint32_t kgd_gfx_v10_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev);
+uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid);
+uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst);
+uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id);
+void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
+ uint32_t *wait_times,
+ uint32_t inst);
+void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
+ uint32_t wait_times,
+ uint32_t grace_period,
+ uint32_t *reg_offset,
+ uint32_t *reg_data,
+ uint32_t inst);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index ba21ec6b35e0..8c8437a4383f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -22,6 +22,7 @@
#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_amdkfd_gfx_v10.h"
#include "gc/gc_10_3_0_offset.h"
#include "gc/gc_10_3_0_sh_mask.h"
#include "oss/osssys_5_0_0_offset.h"
@@ -80,7 +81,7 @@ static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t v
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases)
+ uint32_t sh_mem_bases, uint32_t inst)
{
lock_srbm(adev, 0, 0, 0, vmid);
@@ -93,7 +94,7 @@ static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t v
/* ATC is defeatured on Sienna_Cichlid */
static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int pasid,
- unsigned int vmid)
+ unsigned int vmid, uint32_t inst)
{
uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT;
@@ -105,7 +106,8 @@ static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int
return 0;
}
-static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id)
+static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
{
uint32_t mec;
uint32_t pipe;
@@ -177,7 +179,7 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr, uint32_t wptr_shift,
- uint32_t wptr_mask, struct mm_struct *mm)
+ uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
{
struct v10_compute_mqd *m;
uint32_t *mqd_hqd;
@@ -273,9 +275,9 @@ static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t doorbell_off)
+ uint32_t doorbell_off, uint32_t inst)
{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v10_compute_mqd *m;
uint32_t mec, pipe;
int r;
@@ -290,7 +292,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@@ -317,7 +319,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;
@@ -325,7 +327,7 @@ out_unlock:
static int hqd_dump_v10_3(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs)
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
{
uint32_t i = 0, reg;
#define HQD_N_REGS 56
@@ -456,7 +458,7 @@ static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev,
uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
uint32_t act;
bool retval = false;
@@ -498,7 +500,7 @@ static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev,
static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
@@ -586,7 +588,7 @@ static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
static int wave_control_execute_v10_3(struct amdgpu_device *adev,
uint32_t gfx_index_val,
- uint32_t sq_cmd)
+ uint32_t sq_cmd, uint32_t inst)
{
uint32_t data = 0;
@@ -628,7 +630,8 @@ static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev,
}
static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev,
- uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
+ uint32_t inst)
{
lock_srbm(adev, 0, 0, 0, vmid);
@@ -652,142 +655,6 @@ static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev,
unlock_srbm(adev);
}
-#if 0
-uint32_t enable_debug_trap_v10_3(struct amdgpu_device *adev,
- uint32_t trap_debug_wave_launch_mode,
- uint32_t vmid)
-{
- uint32_t data = 0;
- uint32_t orig_wave_cntl_value;
- uint32_t orig_stall_vmid;
-
- mutex_lock(&adev->grbm_idx_mutex);
-
- orig_wave_cntl_value = RREG32(SOC15_REG_OFFSET(GC,
- 0,
- mmSPI_GDBG_WAVE_CNTL));
- orig_stall_vmid = REG_GET_FIELD(orig_wave_cntl_value,
- SPI_GDBG_WAVE_CNTL,
- STALL_VMID);
-
- data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
-
- data = 0;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), orig_stall_vmid);
-
- mutex_unlock(&adev->grbm_idx_mutex);
-
- return 0;
-}
-
-uint32_t disable_debug_trap_v10_3(struct amdgpu_device *adev)
-{
- mutex_lock(&adev->grbm_idx_mutex);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
-
- mutex_unlock(&adev->grbm_idx_mutex);
-
- return 0;
-}
-
-uint32_t set_wave_launch_trap_override_v10_3(struct amdgpu_device *adev,
- uint32_t trap_override,
- uint32_t trap_mask)
-{
- uint32_t data = 0;
-
- mutex_lock(&adev->grbm_idx_mutex);
-
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
- data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
-
- data = 0;
- data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK,
- EXCP_EN, trap_mask);
- data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK,
- REPLACE, trap_override);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
-
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
- data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
-
- mutex_unlock(&adev->grbm_idx_mutex);
-
- return 0;
-}
-
-uint32_t set_wave_launch_mode_v10_3(struct amdgpu_device *adev,
- uint8_t wave_launch_mode,
- uint32_t vmid)
-{
- uint32_t data = 0;
- bool is_stall_mode;
- bool is_mode_set;
-
- is_stall_mode = (wave_launch_mode == 4);
- is_mode_set = (wave_launch_mode != 0 && wave_launch_mode != 4);
-
- mutex_lock(&adev->grbm_idx_mutex);
-
- data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
- VMID_MASK, is_mode_set ? 1 << vmid : 0);
- data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
- MODE, is_mode_set ? wave_launch_mode : 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data);
-
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
- data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL,
- STALL_VMID, is_stall_mode ? 1 << vmid : 0);
- data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL,
- STALL_RA, is_stall_mode ? 1 : 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
-
- mutex_unlock(&adev->grbm_idx_mutex);
-
- return 0;
-}
-
-/* kgd_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
- * The values read are:
- * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads.
- * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads.
- * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads.
- * gws_wait_time -- Wait Count for Global Wave Syncs.
- * que_sleep_wait_time -- Wait Count for Dequeue Retry.
- * sch_wave_wait_time -- Wait Count for Scheduling Wave Message.
- * sem_rearm_wait_time -- Wait Count for Semaphore re-arm.
- * deq_retry_wait_time -- Wait Count for Global Wave Syncs.
- */
-void get_iq_wait_times_v10_3(struct amdgpu_device *adev,
- uint32_t *wait_times)
-
-{
- *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
-}
-
-void build_grace_period_packet_info_v10_3(struct amdgpu_device *adev,
- uint32_t wait_times,
- uint32_t grace_period,
- uint32_t *reg_offset,
- uint32_t *reg_data)
-{
- *reg_data = wait_times;
-
- *reg_data = REG_SET_FIELD(*reg_data,
- CP_IQ_WAIT_TIME2,
- SCH_WAVE,
- grace_period);
-
- *reg_offset = mmCP_IQ_WAIT_TIME2;
-}
-#endif
-
const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.program_sh_mem_settings = program_sh_mem_settings_v10_3,
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v10_3,
@@ -805,12 +672,13 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info_v10_3,
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
.program_trap_handler_settings = program_trap_handler_settings_v10_3,
-#if 0
- .enable_debug_trap = enable_debug_trap_v10_3,
- .disable_debug_trap = disable_debug_trap_v10_3,
- .set_wave_launch_trap_override = set_wave_launch_trap_override_v10_3,
- .set_wave_launch_mode = set_wave_launch_mode_v10_3,
- .get_iq_wait_times = get_iq_wait_times_v10_3,
- .build_grace_period_packet_info = build_grace_period_packet_info_v10_3,
-#endif
+ .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
+ .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info,
+ .enable_debug_trap = kgd_gfx_v10_enable_debug_trap,
+ .disable_debug_trap = kgd_gfx_v10_disable_debug_trap,
+ .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v10_set_address_watch,
+ .clear_address_watch = kgd_gfx_v10_clear_address_watch
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index 7e80caa05060..d67d003bada2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -30,6 +30,7 @@
#include "soc15d.h"
#include "v11_structs.h"
#include "soc21.h"
+#include <uapi/linux/kfd_ioctl.h>
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -78,7 +79,7 @@ static void program_sh_mem_settings_v11(struct amdgpu_device *adev, uint32_t vmi
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases)
+ uint32_t sh_mem_bases, uint32_t inst)
{
lock_srbm(adev, 0, 0, 0, vmid);
@@ -89,7 +90,7 @@ static void program_sh_mem_settings_v11(struct amdgpu_device *adev, uint32_t vmi
}
static int set_pasid_vmid_mapping_v11(struct amdgpu_device *adev, unsigned int pasid,
- unsigned int vmid)
+ unsigned int vmid, uint32_t inst)
{
uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT;
@@ -101,7 +102,8 @@ static int set_pasid_vmid_mapping_v11(struct amdgpu_device *adev, unsigned int p
return 0;
}
-static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id)
+static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
{
uint32_t mec;
uint32_t pipe;
@@ -162,7 +164,7 @@ static inline struct v11_sdma_mqd *get_sdma_mqd(void *mqd)
static int hqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
+ struct mm_struct *mm, uint32_t inst)
{
struct v11_compute_mqd *m;
uint32_t *mqd_hqd;
@@ -258,9 +260,9 @@ static int hqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t doorbell_off)
+ uint32_t doorbell_off, uint32_t inst)
{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v11_compute_mqd *m;
uint32_t mec, pipe;
int r;
@@ -275,7 +277,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@@ -302,7 +304,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;
@@ -310,7 +312,7 @@ out_unlock:
static int hqd_dump_v11(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs)
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
{
uint32_t i = 0, reg;
#define HQD_N_REGS 56
@@ -445,7 +447,7 @@ static int hqd_sdma_dump_v11(struct amdgpu_device *adev,
}
static bool hqd_is_occupied_v11(struct amdgpu_device *adev, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+ uint32_t pipe_id, uint32_t queue_id, uint32_t inst)
{
uint32_t act;
bool retval = false;
@@ -486,7 +488,7 @@ static bool hqd_sdma_is_occupied_v11(struct amdgpu_device *adev, void *mqd)
static int hqd_destroy_v11(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
@@ -571,7 +573,7 @@ static int hqd_sdma_destroy_v11(struct amdgpu_device *adev, void *mqd,
static int wave_control_execute_v11(struct amdgpu_device *adev,
uint32_t gfx_index_val,
- uint32_t sq_cmd)
+ uint32_t sq_cmd, uint32_t inst)
{
uint32_t data = 0;
@@ -606,6 +608,184 @@ static void set_vm_context_page_table_base_v11(struct amdgpu_device *adev,
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
+/*
+ * Returns TRAP_EN, EXCP_EN and EXCP_REPLACE.
+ *
+ * restore_dbg_registers is ignored here but is a general interface requirement
+ * for devices that support GFXOFF and where the RLC save/restore list
+ * does not support hw registers for debugging i.e. the driver has to manually
+ * initialize the debug mode registers after it has disabled GFX off during the
+ * debug session.
+ */
+static uint32_t kgd_gfx_v11_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+/* Returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v11_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+static int kgd_gfx_v11_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported)
+{
+ *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION;
+
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 4))
+ *trap_mask_supported |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START |
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR &&
+ trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE)
+ return -EPERM;
+
+ return 0;
+}
+
+static uint32_t trap_mask_map_sw_to_hw(uint32_t mask)
+{
+ uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0;
+ uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0;
+ uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION);
+ uint32_t ret;
+
+ ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en);
+ ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start);
+ ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end);
+
+ return ret;
+}
+
+static uint32_t trap_mask_map_hw_to_sw(uint32_t mask)
+{
+ uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN);
+
+ if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START))
+ ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START;
+
+ if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END))
+ ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+ return ret;
+}
+
+/* Returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v11_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev)
+{
+ uint32_t data = 0;
+
+ *trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev);
+
+ data = (trap_mask_bits & trap_mask_request) | (*trap_mask_prev & ~trap_mask_request);
+ data = trap_mask_map_sw_to_hw(data);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override);
+
+ return data;
+}
+
+static uint32_t kgd_gfx_v11_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, LAUNCH_MODE, wave_launch_mode);
+
+ return data;
+}
+
+#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H)
+static uint32_t kgd_gfx_v11_set_address_watch(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst)
+{
+ uint32_t watch_address_high;
+ uint32_t watch_address_low;
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+ watch_address_low = lower_32_bits(watch_address);
+ watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 7);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 1);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_H) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_high);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_L) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_low);
+
+ return watch_address_cntl;
+}
+
+static uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id)
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.program_sh_mem_settings = program_sh_mem_settings_v11,
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
@@ -622,4 +802,11 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.wave_control_execute = wave_control_execute_v11,
.get_atc_vmid_pasid_mapping_info = NULL,
.set_vm_context_page_table_base = set_vm_context_page_table_base_v11,
+ .enable_debug_trap = kgd_gfx_v11_enable_debug_trap,
+ .disable_debug_trap = kgd_gfx_v11_disable_debug_trap,
+ .validate_trap_override_request = kgd_gfx_v11_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v11_set_address_watch,
+ .clear_address_watch = kgd_gfx_v11_clear_address_watch
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index e83cb1c09610..6bf448ab3dff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -78,7 +78,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases)
+ uint32_t sh_mem_bases, uint32_t inst)
{
lock_srbm(adev, 0, 0, 0, vmid);
@@ -91,7 +91,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi
}
static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
- unsigned int vmid)
+ unsigned int vmid, uint32_t inst)
{
/*
* We have to assume that there is no outstanding mapping.
@@ -114,7 +114,8 @@ static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
return 0;
}
-static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
{
uint32_t mec;
uint32_t pipe;
@@ -158,7 +159,7 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr, uint32_t wptr_shift,
- uint32_t wptr_mask, struct mm_struct *mm)
+ uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
{
struct cik_mqd *m;
uint32_t *mqd_hqd;
@@ -202,7 +203,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
static int kgd_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs)
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
{
uint32_t i = 0, reg;
#define HQD_N_REGS (35+4)
@@ -318,7 +319,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
uint32_t act;
bool retval = false;
@@ -358,7 +359,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
uint32_t temp;
enum hqd_dequeue_request_type type;
@@ -494,7 +495,7 @@ static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
- uint32_t sq_cmd)
+ uint32_t sq_cmd, uint32_t inst)
{
uint32_t data;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 870f352837fc..cd06e4a6d1da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -72,7 +72,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases)
+ uint32_t sh_mem_bases, uint32_t inst)
{
lock_srbm(adev, 0, 0, 0, vmid);
@@ -85,7 +85,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi
}
static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
- unsigned int vmid)
+ unsigned int vmid, uint32_t inst)
{
/*
* We have to assume that there is no outstanding mapping.
@@ -109,7 +109,8 @@ static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
return 0;
}
-static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
{
uint32_t mec;
uint32_t pipe;
@@ -153,7 +154,7 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr, uint32_t wptr_shift,
- uint32_t wptr_mask, struct mm_struct *mm)
+ uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
{
struct vi_mqd *m;
uint32_t *mqd_hqd;
@@ -226,7 +227,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
static int kgd_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs)
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
{
uint32_t i = 0, reg;
#define HQD_N_REGS (54+4)
@@ -350,7 +351,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
uint32_t act;
bool retval = false;
@@ -390,7 +391,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
uint32_t temp;
enum hqd_dequeue_request_type type;
@@ -540,7 +541,7 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
- uint32_t sq_cmd)
+ uint32_t sq_cmd, uint32_t inst)
{
uint32_t data = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index e92b93557c13..fa5ee96f8845 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -38,6 +38,7 @@
#include "soc15d.h"
#include "gfx_v9_0.h"
#include "amdgpu_amdkfd_gfx_v9.h"
+#include <uapi/linux/kfd_ioctl.h>
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -46,29 +47,29 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
- uint32_t queue, uint32_t vmid)
+static void kgd_gfx_v9_lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid, uint32_t inst)
{
mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, mec, pipe, queue, vmid);
+ soc15_grbm_select(adev, mec, pipe, queue, vmid, GET_INST(GC, inst));
}
-static void unlock_srbm(struct amdgpu_device *adev)
+static void kgd_gfx_v9_unlock_srbm(struct amdgpu_device *adev, uint32_t inst)
{
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, inst));
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
- uint32_t queue_id)
+void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst)
{
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(adev, mec, pipe, queue_id, 0);
+ kgd_gfx_v9_lock_srbm(adev, mec, pipe, queue_id, 0, inst);
}
-static uint64_t get_queue_mask(struct amdgpu_device *adev,
+uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id)
{
unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
@@ -77,28 +78,28 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev,
return 1ull << bit;
}
-static void release_queue(struct amdgpu_device *adev)
+void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst)
{
- unlock_srbm(adev);
+ kgd_gfx_v9_unlock_srbm(adev, inst);
}
void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases)
+ uint32_t sh_mem_bases, uint32_t inst)
{
- lock_srbm(adev, 0, 0, 0, vmid);
+ kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG), sh_mem_config);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_BASES), sh_mem_bases);
/* APE1 no longer exists on GFX9 */
- unlock_srbm(adev);
+ kgd_gfx_v9_unlock_srbm(adev, inst);
}
int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
- unsigned int vmid)
+ unsigned int vmid, uint32_t inst)
{
/*
* We have to assume that there is no outstanding mapping.
@@ -156,7 +157,8 @@ int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
* but still works
*/
-int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
+int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
{
uint32_t mec;
uint32_t pipe;
@@ -164,13 +166,13 @@ int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id)
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(adev, mec, pipe, 0, 0);
+ kgd_gfx_v9_lock_srbm(adev, mec, pipe, 0, 0, inst);
- WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(adev);
+ kgd_gfx_v9_unlock_srbm(adev, inst);
return 0;
}
@@ -220,7 +222,8 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr, uint32_t wptr_shift,
- uint32_t wptr_mask, struct mm_struct *mm)
+ uint32_t wptr_mask, struct mm_struct *mm,
+ uint32_t inst)
{
struct v9_mqd *m;
uint32_t *mqd_hqd;
@@ -228,21 +231,22 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
m = get_mqd(mqd);
- acquire_queue(adev, pipe_id, queue_id);
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
- hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
+ hqd_base = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR);
for (reg = hqd_base;
- reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
+ reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
/* Activate doorbell logic before triggering WPTR poll. */
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL),
+ data);
if (wptr) {
/* Don't read wptr with get_user because the user
@@ -271,43 +275,43 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO),
lower_32_bits(guessed_wptr));
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI),
upper_32_bits(guessed_wptr));
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR),
lower_32_bits((uintptr_t)wptr));
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uintptr_t)wptr));
- WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
- (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1,
+ (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR),
REG_SET_FIELD(m->cp_hqd_eop_rptr,
CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE), data);
- release_queue(adev);
+ kgd_gfx_v9_release_queue(adev, inst);
return 0;
}
int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t doorbell_off)
+ uint32_t doorbell_off, uint32_t inst)
{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[inst].ring;
struct v9_mqd *m;
uint32_t mec, pipe;
int r;
m = get_mqd(mqd);
- acquire_queue(adev, pipe_id, queue_id);
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
@@ -315,7 +319,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[inst].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@@ -342,15 +346,15 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
- spin_unlock(&adev->gfx.kiq.ring_lock);
- release_queue(adev);
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+ kgd_gfx_v9_release_queue(adev, inst);
return r;
}
int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs)
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
{
uint32_t i = 0, reg;
#define HQD_N_REGS 56
@@ -365,13 +369,13 @@ int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
if (*dump == NULL)
return -ENOMEM;
- acquire_queue(adev, pipe_id, queue_id);
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
- for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
- reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
+ for (reg = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR);
+ reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
DUMP_REG(reg);
- release_queue(adev);
+ kgd_gfx_v9_release_queue(adev, inst);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -481,23 +485,23 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(adev, pipe_id, queue_id);
- act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ act = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
high = upper_32_bits(queue_address >> 8);
- if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
- high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
+ if (low == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE) &&
+ high == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(adev);
+ kgd_gfx_v9_release_queue(adev, inst);
return retval;
}
@@ -522,7 +526,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
@@ -532,10 +536,10 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
if (amdgpu_in_reset(adev))
return -EIO;
- acquire_queue(adev, pipe_id, queue_id);
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
if (m->cp_hqd_vmid == 0)
- WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
+ WREG32_FIELD15_RLC(GC, GET_INST(GC, inst), RLC_CP_SCHEDULERS, scheduler1, 0);
switch (reset_type) {
case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
@@ -552,22 +556,22 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
break;
}
- WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST), type);
end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
- temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
+ temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n");
- release_queue(adev);
+ kgd_gfx_v9_release_queue(adev, inst);
return -ETIME;
}
usleep_range(500, 1000);
}
- release_queue(adev);
+ kgd_gfx_v9_release_queue(adev, inst);
return 0;
}
@@ -624,14 +628,14 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
- uint32_t sq_cmd)
+ uint32_t sq_cmd, uint32_t inst)
{
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
- WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
- WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
+ WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, gfx_index_val);
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_CMD, sq_cmd);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
INSTANCE_BROADCAST_WRITES, 1);
@@ -640,12 +644,274 @@ int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
SE_BROADCAST_WRITES, 1);
- WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
+ WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, data);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
+/*
+ * GFX9 helper for wave launch stall requirements on debug trap setting.
+ *
+ * vmid:
+ * Target VMID to stall/unstall.
+ *
+ * stall:
+ * 0-unstall wave launch (enable), 1-stall wave launch (disable).
+ * After wavefront launch has been stalled, allocated waves must drain from
+ * SPI in order for debug trap settings to take effect on those waves.
+ * This is roughly a ~96 clock cycle wait on SPI where a read on
+ * SPI_GDBG_WAVE_CNTL translates to ~32 clock cycles.
+ * KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY indicates the number of reads required.
+ *
+ * NOTE: We can afford to clear the entire STALL_VMID field on unstall
+ * because GFX9.4.1 cannot support multi-process debugging due to trap
+ * configuration and masking being limited to global scope. Always assume
+ * single process conditions.
+ */
+#define KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY 3
+void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev,
+ uint32_t vmid,
+ bool stall)
+{
+ int i;
+ uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1))
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID,
+ stall ? 1 << vmid : 0);
+ else
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA,
+ stall ? 1 : 0);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
+
+ if (!stall)
+ return;
+
+ for (i = 0; i < KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY; i++)
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+}
+
+/*
+ * restore_dbg_registers is ignored here but is a general interface requirement
+ * for devices that support GFXOFF and where the RLC save/restore list
+ * does not support hw registers for debugging i.e. the driver has to manually
+ * initialize the debug mode registers after it has disabled GFX off during the
+ * debug session.
+ */
+uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid)
+{
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+/*
+ * keep_trap_enabled is ignored here but is a general interface requirement
+ * for devices that support multi-process debugging where the performance
+ * overhead from trap temporary setup needs to be bypassed when the debug
+ * session has ended.
+ */
+uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+int kgd_gfx_v9_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported)
+{
+ *trap_mask_supported &= KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH;
+
+ /* The SPI_GDBG_TRAP_MASK register is global and affects all
+ * processes. Only allow OR-ing the address-watch bit, since
+ * this only affects processes under the debugger. Other bits
+ * should stay 0 to avoid the debugger interfering with other
+ * processes.
+ */
+ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR)
+ return -EINVAL;
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_cntl_prev)
+{
+ uint32_t data, wave_cntl_prev;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ wave_cntl_prev = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK));
+ *trap_mask_prev = REG_GET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN);
+
+ trap_mask_bits = (trap_mask_bits & trap_mask_request) |
+ (*trap_mask_prev & ~trap_mask_request);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN, trap_mask_bits);
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, REPLACE, trap_override);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
+
+ /* We need to preserve wave launch mode stall settings. */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), wave_cntl_prev);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+ bool is_mode_set = !!wave_launch_mode;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
+ VMID_MASK, is_mode_set ? 1 << vmid : 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
+ MODE, is_mode_set ? wave_launch_mode : 0);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+#define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H)
+uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst)
+{
+ uint32_t watch_address_high;
+ uint32_t watch_address_low;
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+
+ watch_address_low = lower_32_bits(watch_address);
+ watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VMID,
+ debug_vmid);
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 6);
+
+ /* Turning off this watch point until we set all the registers */
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 0);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_cntl);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_high);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_low);
+
+ /* Enable the watch point */
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 1);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_cntl);
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id)
+{
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_cntl);
+
+ return 0;
+}
+
+/* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
+ * The values read are:
+ * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads.
+ * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads.
+ * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads.
+ * gws_wait_time -- Wait Count for Global Wave Syncs.
+ * que_sleep_wait_time -- Wait Count for Dequeue Retry.
+ * sch_wave_wait_time -- Wait Count for Scheduling Wave Message.
+ * sem_rearm_wait_time -- Wait Count for Semaphore re-arm.
+ * deq_retry_wait_time -- Wait Count for Global Wave Syncs.
+ */
+void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
+ uint32_t *wait_times,
+ uint32_t inst)
+
+{
+ *wait_times = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+ mmCP_IQ_WAIT_TIME2));
+}
+
void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base)
{
@@ -682,10 +948,11 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
* @queue_idx: Index of queue in the queue-map bit-field
* @wave_cnt: Output parameter updated with number of waves in flight
* @vmid: Output parameter updated with VMID of queue whose wave count
- * is being collected
+ * is being collected
+ * @inst: xcc's instance number on a multi-XCC setup
*/
static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
- int *wave_cnt, int *vmid)
+ int *wave_cnt, int *vmid, uint32_t inst)
{
int pipe_idx;
int queue_slot;
@@ -700,12 +967,12 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
*wave_cnt = 0;
pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
- soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0);
- reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
+ soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, inst);
+ reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, inst, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
queue_slot);
*wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
if (*wave_cnt != 0)
- *vmid = (RREG32_SOC15(GC, 0, mmCP_HQD_VMID) &
+ *vmid = (RREG32_SOC15(GC, inst, mmCP_HQD_VMID) &
CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT;
}
@@ -718,9 +985,10 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
* @adev: Handle of device from which to get number of waves in flight
* @pasid: Identifies the process for which this query call is invoked
* @pasid_wave_cnt: Output parameter updated with number of waves in flight that
- * belong to process with given pasid
+ * belong to process with given pasid
* @max_waves_per_cu: Output parameter updated with maximum number of waves
- * possible per Compute Unit
+ * possible per Compute Unit
+ * @inst: xcc's instance number on a multi-XCC setup
*
* Note: It's possible that the device has too many queues (oversubscription)
* in which case a VMID could be remapped to a different PASID. This could lead
@@ -756,7 +1024,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
* Reading registers referenced above involves programming GRBM appropriately
*/
void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
- int *pasid_wave_cnt, int *max_waves_per_cu)
+ int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst)
{
int qidx;
int vmid;
@@ -772,13 +1040,13 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
lock_spi_csq_mutexes(adev);
- soc15_grbm_select(adev, 1, 0, 0, 0);
+ soc15_grbm_select(adev, 1, 0, 0, 0, inst);
/*
* Iterate through the shader engines and arrays of the device
* to get number of waves in flight
*/
- bitmap_complement(cp_queue_bitmap, adev->gfx.mec.queue_bitmap,
+ bitmap_complement(cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap,
KGD_MAX_QUEUES);
max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe;
@@ -787,8 +1055,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
for (se_idx = 0; se_idx < se_cnt; se_idx++) {
for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
- amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
- queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS);
+ amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, inst);
+ queue_map = RREG32_SOC15(GC, inst, mmSPI_CSQ_WF_ACTIVE_STATUS);
/*
* Assumption: queue map encodes following schema: four
@@ -808,10 +1076,11 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
continue;
/* Get number of waves in flight and aggregate them */
- get_wave_count(adev, qidx, &wave_cnt, &vmid);
+ get_wave_count(adev, qidx, &wave_cnt, &vmid,
+ inst);
if (wave_cnt != 0) {
pasid_tmp =
- RREG32(SOC15_REG_OFFSET(OSSSYS, 0,
+ RREG32(SOC15_REG_OFFSET(OSSSYS, inst,
mmIH_VMID_0_LUT) + vmid);
if (pasid_tmp == pasid)
vmid_wave_cnt += wave_cnt;
@@ -820,8 +1089,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
}
}
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, inst);
+ soc15_grbm_select(adev, 0, 0, 0, 0, inst);
unlock_spi_csq_mutexes(adev);
/* Update the output parameters and return */
@@ -830,28 +1099,53 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
adev->gfx.cu_info.max_waves_per_simd;
}
+void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
+ uint32_t wait_times,
+ uint32_t grace_period,
+ uint32_t *reg_offset,
+ uint32_t *reg_data,
+ uint32_t inst)
+{
+ *reg_data = wait_times;
+
+ /*
+ * The CP cannot handle a 0 grace period input and will result in
+ * an infinite grace period being set so set to 1 to prevent this.
+ */
+ if (grace_period == 0)
+ grace_period = 1;
+
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ SCH_WAVE,
+ grace_period);
+
+ *reg_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+ mmCP_IQ_WAIT_TIME2);
+}
+
void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
- uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst)
{
- lock_srbm(adev, 0, 0, 0, vmid);
+ kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);
/*
* Program TBA registers
*/
- WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_LO,
- lower_32_bits(tba_addr >> 8));
- WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_HI,
- upper_32_bits(tba_addr >> 8));
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_LO,
+ lower_32_bits(tba_addr >> 8));
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_HI,
+ upper_32_bits(tba_addr >> 8));
/*
* Program TMA registers
*/
- WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_LO,
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_LO,
lower_32_bits(tma_addr >> 8));
- WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_HI,
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_HI,
upper_32_bits(tma_addr >> 8));
- unlock_srbm(adev);
+ kgd_gfx_v9_unlock_srbm(adev, inst);
}
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
@@ -871,6 +1165,15 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+ .enable_debug_trap = kgd_gfx_v9_enable_debug_trap,
+ .disable_debug_trap = kgd_gfx_v9_disable_debug_trap,
+ .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v9_set_address_watch,
+ .clear_address_watch = kgd_gfx_v9_clear_address_watch,
+ .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
+ .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index c7ed3bc9053c..936e501908ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -20,41 +20,85 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-
void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
+ uint32_t sh_mem_bases, uint32_t inst);
int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
- unsigned int vmid);
-int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id);
+ unsigned int vmid, uint32_t inst);
+int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst);
int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
+ struct mm_struct *mm, uint32_t inst);
int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t doorbell_off);
+ uint32_t doorbell_off, uint32_t inst);
int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst);
bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id);
+ uint32_t queue_id, uint32_t inst);
int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
+ uint32_t queue_id, uint32_t inst);
int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
- uint32_t sq_cmd);
+ uint32_t sq_cmd, uint32_t inst);
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid);
-
void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base);
void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
- int *pasid_wave_cnt, int *max_waves_per_cu);
+ int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst);
void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
- uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
+ uint32_t inst);
+void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst);
+uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id);
+void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst);
+void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev,
+ uint32_t vmid,
+ bool stall);
+uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid);
+uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid);
+int kgd_gfx_v9_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported);
+uint32_t kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid);
+uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev);
+uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst);
+uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id);
+void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
+ uint32_t *wait_times,
+ uint32_t inst);
+void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
+ uint32_t wait_times,
+ uint32_t grace_period,
+ uint32_t *reg_offset,
+ uint32_t *reg_data,
+ uint32_t inst);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 83a83ced2439..7d6daf8d2bfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -27,6 +27,8 @@
#include <linux/sched/task.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_exec.h>
+
#include "amdgpu_object.h"
#include "amdgpu_gem.h"
#include "amdgpu_vm.h"
@@ -35,6 +37,7 @@
#include "amdgpu_dma_buf.h"
#include <uapi/linux/kfd_ioctl.h>
#include "amdgpu_xgmi.h"
+#include "kfd_priv.h"
#include "kfd_smi_events.h"
/* Userptr restore delay, just long enough to allow consecutive VM
@@ -110,13 +113,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
struct sysinfo si;
uint64_t mem;
+ if (kfd_mem_limit.max_system_mem_limit)
+ return;
+
si_meminfo(&si);
mem = si.freeram - si.freehigh;
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
- kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
+ kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT;
pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
(kfd_mem_limit.max_system_mem_limit >> 20),
(kfd_mem_limit.max_ttm_mem_limit >> 20));
@@ -148,16 +154,20 @@ void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
* @size: Size of buffer, in bytes, encapsulated by B0. This should be
* equivalent to amdgpu_bo_size(BO)
* @alloc_flag: Flag used in allocating a BO as noted above
+ * @xcp_id: xcp_id is used to get xcp from xcp manager, one xcp is
+ * managed as one compute node in driver for app
*
- * Return: returns -ENOMEM in case of error, ZERO otherwise
+ * Return:
+ * returns -ENOMEM in case of error, ZERO otherwise
*/
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 alloc_flag)
+ uint64_t size, u32 alloc_flag, int8_t xcp_id)
{
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
size_t system_mem_needed, ttm_mem_needed, vram_needed;
int ret = 0;
+ uint64_t vram_size = 0;
system_mem_needed = 0;
ttm_mem_needed = 0;
@@ -172,6 +182,17 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
* 2M BO chunk.
*/
vram_needed = size;
+ /*
+ * For GFX 9.4.3, get the VRAM size from XCP structs
+ */
+ if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
+ return -EINVAL;
+
+ vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
+ if (adev->gmc.is_app_apu) {
+ system_mem_needed = size;
+ ttm_mem_needed = size;
+ }
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
system_mem_needed = size;
} else if (!(alloc_flag &
@@ -191,8 +212,8 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
- (adev && adev->kfd.vram_used + vram_needed >
- adev->gmc.real_vram_size - reserved_for_pt)) {
+ (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
+ vram_size - reserved_for_pt)) {
ret = -ENOMEM;
goto release;
}
@@ -202,9 +223,11 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
*/
WARN_ONCE(vram_needed && !adev,
"adev reference can't be null when vram is used");
- if (adev) {
- adev->kfd.vram_used += vram_needed;
- adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
+ if (adev && xcp_id >= 0) {
+ adev->kfd.vram_used[xcp_id] += vram_needed;
+ adev->kfd.vram_used_aligned[xcp_id] += adev->gmc.is_app_apu ?
+ vram_needed :
+ ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
}
kfd_mem_limit.system_mem_used += system_mem_needed;
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
@@ -215,7 +238,7 @@ release:
}
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 alloc_flag)
+ uint64_t size, u32 alloc_flag, int8_t xcp_id)
{
spin_lock(&kfd_mem_limit.mem_limit_lock);
@@ -225,9 +248,19 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
WARN_ONCE(!adev,
"adev reference can't be null when alloc mem flags vram is set");
+ if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
+ goto release;
+
if (adev) {
- adev->kfd.vram_used -= size;
- adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
+ adev->kfd.vram_used[xcp_id] -= size;
+ if (adev->gmc.is_app_apu) {
+ adev->kfd.vram_used_aligned[xcp_id] -= size;
+ kfd_mem_limit.system_mem_used -= size;
+ kfd_mem_limit.ttm_mem_used -= size;
+ } else {
+ adev->kfd.vram_used_aligned[xcp_id] -=
+ ALIGN(size, VRAM_AVAILABLITY_ALIGN);
+ }
}
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
kfd_mem_limit.system_mem_used -= size;
@@ -237,8 +270,8 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
goto release;
}
- WARN_ONCE(adev && adev->kfd.vram_used < 0,
- "KFD VRAM memory accounting unbalanced");
+ WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0,
+ "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id);
WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
"KFD TTM memory accounting unbalanced");
WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
@@ -254,14 +287,16 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
u32 alloc_flags = bo->kfd_bo->alloc_flags;
u64 size = amdgpu_bo_size(bo);
- amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags);
+ amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags,
+ bo->xcp_id);
kfree(bo->kfd_bo);
}
/**
- * @create_dmamap_sg_bo: Creates a amdgpu_bo object to reflect information
+ * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information
* about USERPTR or DOOREBELL or MMIO BO.
+ *
* @adev: Device for which dmamap BO is being created
* @mem: BO of peer device that is being DMA mapped. Provides parameters
* in building the dmamap BO
@@ -285,7 +320,7 @@ create_dmamap_sg_bo(struct amdgpu_device *adev,
ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
- ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj);
+ ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0);
amdgpu_bo_unreserve(mem->bo);
@@ -527,6 +562,12 @@ kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
{
struct ttm_operation_ctx ctx = {.interruptible = true};
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+ int ret;
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ return ret;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@@ -659,11 +700,10 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
static void
kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
{
- struct ttm_operation_ctx ctx = {.interruptible = true};
- struct amdgpu_bo *bo = attachment->bo_va->base.bo;
-
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ /* This is a no-op. We don't want to trigger eviction fences when
+ * unmapping DMABufs. Therefore the invalidation (moving to system
+ * domain) is done in kfd_mem_dmamap_dmabuf.
+ */
}
/**
@@ -804,7 +844,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
* if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device
*/
- if ((adev != bo_adev) &&
+ if ((adev != bo_adev && !adev->gmc.is_app_apu) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
@@ -925,28 +965,20 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
struct amdkfd_process_info *process_info,
bool userptr)
{
- struct ttm_validate_buffer *entry = &mem->validate_list;
- struct amdgpu_bo *bo = mem->bo;
-
- INIT_LIST_HEAD(&entry->head);
- entry->num_shared = 1;
- entry->bo = &bo->tbo;
mutex_lock(&process_info->lock);
if (userptr)
- list_add_tail(&entry->head, &process_info->userptr_valid_list);
+ list_add_tail(&mem->validate_list,
+ &process_info->userptr_valid_list);
else
- list_add_tail(&entry->head, &process_info->kfd_bo_list);
+ list_add_tail(&mem->validate_list, &process_info->kfd_bo_list);
mutex_unlock(&process_info->lock);
}
static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
struct amdkfd_process_info *process_info)
{
- struct ttm_validate_buffer *bo_list_entry;
-
- bo_list_entry = &mem->validate_list;
mutex_lock(&process_info->lock);
- list_del(&bo_list_entry->head);
+ list_del(&mem->validate_list);
mutex_unlock(&process_info->lock);
}
@@ -1033,13 +1065,12 @@ out:
* object can track VM updates.
*/
struct bo_vm_reservation_context {
- struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
- unsigned int n_vms; /* Number of VMs reserved */
- struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
- struct ww_acquire_ctx ticket; /* Reservation ticket */
- struct list_head list, duplicates; /* BO lists */
- struct amdgpu_sync *sync; /* Pointer to sync object */
- bool reserved; /* Whether BOs are reserved */
+ /* DRM execution context for the reservation */
+ struct drm_exec exec;
+ /* Number of VMs reserved */
+ unsigned int n_vms;
+ /* Pointer to sync object */
+ struct amdgpu_sync *sync;
};
enum bo_vm_match {
@@ -1063,35 +1094,26 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
WARN_ON(!vm);
- ctx->reserved = false;
ctx->n_vms = 1;
ctx->sync = &mem->sync;
-
- INIT_LIST_HEAD(&ctx->list);
- INIT_LIST_HEAD(&ctx->duplicates);
-
- ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
- if (!ctx->vm_pd)
- return -ENOMEM;
-
- ctx->kfd_bo.priority = 0;
- ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.num_shared = 1;
- list_add(&ctx->kfd_bo.tv.head, &ctx->list);
-
- amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
-
- ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates);
- if (ret) {
- pr_err("Failed to reserve buffers in ttm.\n");
- kfree(ctx->vm_pd);
- ctx->vm_pd = NULL;
- return ret;
+ drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&ctx->exec) {
+ ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
+ drm_exec_retry_on_contention(&ctx->exec);
+ if (unlikely(ret))
+ goto error;
+
+ ret = drm_exec_lock_obj(&ctx->exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&ctx->exec);
+ if (unlikely(ret))
+ goto error;
}
-
- ctx->reserved = true;
return 0;
+
+error:
+ pr_err("Failed to reserve buffers in ttm.\n");
+ drm_exec_fini(&ctx->exec);
+ return ret;
}
/**
@@ -1108,63 +1130,39 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
struct amdgpu_vm *vm, enum bo_vm_match map_type,
struct bo_vm_reservation_context *ctx)
{
- struct amdgpu_bo *bo = mem->bo;
struct kfd_mem_attachment *entry;
- unsigned int i;
+ struct amdgpu_bo *bo = mem->bo;
int ret;
- ctx->reserved = false;
- ctx->n_vms = 0;
- ctx->vm_pd = NULL;
ctx->sync = &mem->sync;
+ drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&ctx->exec) {
+ ctx->n_vms = 0;
+ list_for_each_entry(entry, &mem->attachments, list) {
+ if ((vm && vm != entry->bo_va->base.vm) ||
+ (entry->is_mapped != map_type
+ && map_type != BO_VM_ALL))
+ continue;
- INIT_LIST_HEAD(&ctx->list);
- INIT_LIST_HEAD(&ctx->duplicates);
-
- list_for_each_entry(entry, &mem->attachments, list) {
- if ((vm && vm != entry->bo_va->base.vm) ||
- (entry->is_mapped != map_type
- && map_type != BO_VM_ALL))
- continue;
-
- ctx->n_vms++;
- }
-
- if (ctx->n_vms != 0) {
- ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
- GFP_KERNEL);
- if (!ctx->vm_pd)
- return -ENOMEM;
- }
-
- ctx->kfd_bo.priority = 0;
- ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.num_shared = 1;
- list_add(&ctx->kfd_bo.tv.head, &ctx->list);
-
- i = 0;
- list_for_each_entry(entry, &mem->attachments, list) {
- if ((vm && vm != entry->bo_va->base.vm) ||
- (entry->is_mapped != map_type
- && map_type != BO_VM_ALL))
- continue;
-
- amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
- &ctx->vm_pd[i]);
- i++;
- }
+ ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
+ &ctx->exec, 2);
+ drm_exec_retry_on_contention(&ctx->exec);
+ if (unlikely(ret))
+ goto error;
+ ++ctx->n_vms;
+ }
- ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates);
- if (ret) {
- pr_err("Failed to reserve buffers in ttm.\n");
- kfree(ctx->vm_pd);
- ctx->vm_pd = NULL;
- return ret;
+ ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
+ drm_exec_retry_on_contention(&ctx->exec);
+ if (unlikely(ret))
+ goto error;
}
-
- ctx->reserved = true;
return 0;
+
+error:
+ pr_err("Failed to reserve buffers in ttm.\n");
+ drm_exec_fini(&ctx->exec);
+ return ret;
}
/**
@@ -1185,15 +1183,8 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
if (wait)
ret = amdgpu_sync_wait(ctx->sync, intr);
- if (ctx->reserved)
- ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
- kfree(ctx->vm_pd);
-
+ drm_exec_fini(&ctx->exec);
ctx->sync = NULL;
-
- ctx->reserved = false;
- ctx->vm_pd = NULL;
-
return ret;
}
@@ -1599,23 +1590,42 @@ out_unlock:
return ret;
}
-size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
+size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
+ uint8_t xcp_id)
{
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
ssize_t available;
+ uint64_t vram_available, system_mem_available, ttm_mem_available;
spin_lock(&kfd_mem_limit.mem_limit_lock);
- available = adev->gmc.real_vram_size
- - adev->kfd.vram_used_aligned
+ vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
+ - adev->kfd.vram_used_aligned[xcp_id]
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
+
+ if (adev->gmc.is_app_apu) {
+ system_mem_available = no_system_mem_limit ?
+ kfd_mem_limit.max_system_mem_limit :
+ kfd_mem_limit.max_system_mem_limit -
+ kfd_mem_limit.system_mem_used;
+
+ ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit -
+ kfd_mem_limit.ttm_mem_used;
+
+ available = min3(system_mem_available, ttm_mem_available,
+ vram_available);
+ available = ALIGN_DOWN(available, PAGE_SIZE);
+ } else {
+ available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN);
+ }
+
spin_unlock(&kfd_mem_limit.mem_limit_lock);
if (available < 0)
available = 0;
- return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
+ return available;
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
@@ -1624,6 +1634,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
uint64_t *offset, uint32_t flags, bool criu_resume)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
+ struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm);
enum ttm_bo_type bo_type = ttm_bo_type_device;
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
@@ -1631,6 +1642,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct drm_gem_object *gobj = NULL;
u32 domain, alloc_domain;
uint64_t aligned_size;
+ int8_t xcp_id = -1;
u64 alloc_flags;
int ret;
@@ -1639,9 +1651,18 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
*/
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
- alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
- alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
+
+ if (adev->gmc.is_app_apu) {
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_flags = 0;
+ } else {
+ alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
+ alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
+ }
+ xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
+ 0 : fpriv->xcp_id;
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
@@ -1693,17 +1714,19 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags,
+ xcp_id);
if (ret) {
pr_debug("Insufficient memory\n");
goto err_reserve_limit;
}
- pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
- va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain));
+ pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n",
+ va, (*mem)->aql_queue ? size << 1 : size,
+ domain_string(alloc_domain), xcp_id);
ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
- bo_type, NULL, &gobj);
+ bo_type, NULL, &gobj, xcp_id + 1);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
domain_string(alloc_domain), ret);
@@ -1728,6 +1751,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
(*mem)->domain = domain;
(*mem)->mapped_to_gpu_memory = 0;
(*mem)->process_info = avm->process_info;
+
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
if (user_addr) {
@@ -1759,7 +1783,7 @@ err_node_allow:
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
- amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags);
+ amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
if (gobj)
@@ -1783,7 +1807,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
bool use_release_notifier = (mem->bo->kfd_bo == mem);
struct kfd_mem_attachment *entry, *tmp;
struct bo_vm_reservation_context ctx;
- struct ttm_validate_buffer *bo_list_entry;
unsigned int mapped_to_gpu_memory;
int ret;
bool is_imported = false;
@@ -1811,9 +1834,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
/* Make sure restore workers don't access the BO any more */
- bo_list_entry = &mem->validate_list;
mutex_lock(&process_info->lock);
- list_del(&bo_list_entry->head);
+ list_del(&mem->validate_list);
mutex_unlock(&process_info->lock);
/* Cleanup user pages and MMU notifiers */
@@ -1855,11 +1877,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
/* Update the size of the BO being freed if it was allocated from
- * VRAM and is not imported.
+ * VRAM and is not imported. For APP APU VRAM allocations are done
+ * in GTT domain
*/
if (size) {
- if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
- (!is_imported))
+ if (!is_imported &&
+ (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
+ (adev->gmc.is_app_apu &&
+ mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
*size = bo_size;
else
*size = 0;
@@ -2282,8 +2307,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
(*mem)->dmabuf = dma_buf;
(*mem)->bo = bo;
(*mem)->va = va;
- (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && !adev->gmc.is_app_apu ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+
(*mem)->mapped_to_gpu_memory = 0;
(*mem)->process_info = avm->process_info;
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
@@ -2376,14 +2402,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
/* Move all invalidated BOs to the userptr_inval_list */
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_valid_list,
- validate_list.head)
+ validate_list)
if (mem->invalid)
- list_move_tail(&mem->validate_list.head,
+ list_move_tail(&mem->validate_list,
&process_info->userptr_inval_list);
/* Go through userptr_inval_list and update any invalid user_pages */
list_for_each_entry(mem, &process_info->userptr_inval_list,
- validate_list.head) {
+ validate_list) {
invalid = mem->invalid;
if (!invalid)
/* BO hasn't been invalidated since the last
@@ -2445,7 +2471,9 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
ret = -EAGAIN;
goto unlock_out;
}
- mem->invalid = 0;
+ /* set mem valid if mem has hmm range associated */
+ if (mem->range)
+ mem->invalid = 0;
}
unlock_out:
@@ -2461,50 +2489,41 @@ unlock_out:
*/
static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
{
- struct amdgpu_bo_list_entry *pd_bo_list_entries;
- struct list_head resv_list, duplicates;
- struct ww_acquire_ctx ticket;
+ struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_sync sync;
+ struct drm_exec exec;
struct amdgpu_vm *peer_vm;
struct kgd_mem *mem, *tmp_mem;
struct amdgpu_bo *bo;
- struct ttm_operation_ctx ctx = { false, false };
- int i, ret;
-
- pd_bo_list_entries = kcalloc(process_info->n_vms,
- sizeof(struct amdgpu_bo_list_entry),
- GFP_KERNEL);
- if (!pd_bo_list_entries) {
- pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
- ret = -ENOMEM;
- goto out_no_mem;
- }
-
- INIT_LIST_HEAD(&resv_list);
- INIT_LIST_HEAD(&duplicates);
+ int ret;
- /* Get all the page directory BOs that need to be reserved */
- i = 0;
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node)
- amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
- &pd_bo_list_entries[i++]);
- /* Add the userptr_inval_list entries to resv_list */
- list_for_each_entry(mem, &process_info->userptr_inval_list,
- validate_list.head) {
- list_add_tail(&mem->resv_list.head, &resv_list);
- mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.num_shared = mem->validate_list.num_shared;
- }
+ amdgpu_sync_create(&sync);
+ drm_exec_init(&exec, 0);
/* Reserve all BOs and page tables for validation */
- ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
- WARN(!list_empty(&duplicates), "Duplicates should be empty");
- if (ret)
- goto out_free;
+ drm_exec_until_all_locked(&exec) {
+ /* Reserve all the page directories */
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unreserve_out;
+ }
- amdgpu_sync_create(&sync);
+ /* Reserve the userptr_inval_list entries to resv_list */
+ list_for_each_entry(mem, &process_info->userptr_inval_list,
+ validate_list) {
+ struct drm_gem_object *gobj;
+
+ gobj = &mem->bo->tbo.base;
+ ret = drm_exec_prepare_obj(&exec, gobj, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unreserve_out;
+ }
+ }
ret = process_validate_vms(process_info);
if (ret)
@@ -2513,7 +2532,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
/* Validate BOs and update GPUVM page tables */
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_inval_list,
- validate_list.head) {
+ validate_list) {
struct kfd_mem_attachment *attachment;
bo = mem->bo;
@@ -2555,12 +2574,9 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
ret = process_update_pds(process_info, &sync);
unreserve_out:
- ttm_eu_backoff_reservation(&ticket, &resv_list);
+ drm_exec_fini(&exec);
amdgpu_sync_wait(&sync, false);
amdgpu_sync_free(&sync);
-out_free:
- kfree(pd_bo_list_entries);
-out_no_mem:
return ret;
}
@@ -2576,9 +2592,16 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_inval_list,
- validate_list.head) {
- bool valid = amdgpu_ttm_tt_get_user_pages_done(
- mem->bo->tbo.ttm, mem->range);
+ validate_list) {
+ bool valid;
+
+ /* keep mem without hmm range at userptr_inval_list */
+ if (!mem->range)
+ continue;
+
+ /* Only check mem with hmm range associated */
+ valid = amdgpu_ttm_tt_get_user_pages_done(
+ mem->bo->tbo.ttm, mem->range);
mem->range = NULL;
if (!valid) {
@@ -2586,9 +2609,14 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
ret = -EAGAIN;
continue;
}
- WARN(mem->invalid, "Valid BO is marked invalid");
- list_move_tail(&mem->validate_list.head,
+ if (mem->invalid) {
+ WARN(1, "Valid BO is marked invalid");
+ ret = -EAGAIN;
+ continue;
+ }
+
+ list_move_tail(&mem->validate_list,
&process_info->userptr_valid_list);
}
@@ -2698,50 +2726,44 @@ unlock_out:
*/
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
{
- struct amdgpu_bo_list_entry *pd_bo_list;
struct amdkfd_process_info *process_info = info;
struct amdgpu_vm *peer_vm;
struct kgd_mem *mem;
- struct bo_vm_reservation_context ctx;
struct amdgpu_amdkfd_fence *new_fence;
- int ret = 0, i;
struct list_head duplicate_save;
struct amdgpu_sync sync_obj;
unsigned long failed_size = 0;
unsigned long total_size = 0;
+ struct drm_exec exec;
+ int ret;
INIT_LIST_HEAD(&duplicate_save);
- INIT_LIST_HEAD(&ctx.list);
- INIT_LIST_HEAD(&ctx.duplicates);
- pd_bo_list = kcalloc(process_info->n_vms,
- sizeof(struct amdgpu_bo_list_entry),
- GFP_KERNEL);
- if (!pd_bo_list)
- return -ENOMEM;
-
- i = 0;
mutex_lock(&process_info->lock);
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node)
- amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
- /* Reserve all BOs and page tables/directory. Add all BOs from
- * kfd_bo_list to ctx.list
- */
- list_for_each_entry(mem, &process_info->kfd_bo_list,
- validate_list.head) {
-
- list_add_tail(&mem->resv_list.head, &ctx.list);
- mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.num_shared = mem->validate_list.num_shared;
- }
+ drm_exec_init(&exec, 0);
+ drm_exec_until_all_locked(&exec) {
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto ttm_reserve_fail;
+ }
- ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
- false, &duplicate_save);
- if (ret) {
- pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
- goto ttm_reserve_fail;
+ /* Reserve all BOs and page tables/directory. Add all BOs from
+ * kfd_bo_list to ctx.list
+ */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list) {
+ struct drm_gem_object *gobj;
+
+ gobj = &mem->bo->tbo.base;
+ ret = drm_exec_prepare_obj(&exec, gobj, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto ttm_reserve_fail;
+ }
}
amdgpu_sync_create(&sync_obj);
@@ -2759,7 +2781,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
/* Validate BOs and map them to GPUVM (update VM page tables). */
list_for_each_entry(mem, &process_info->kfd_bo_list,
- validate_list.head) {
+ validate_list) {
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
@@ -2792,6 +2814,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (!attachment->is_mapped)
continue;
+ if (attachment->bo_va->base.bo->tbo.pin_count)
+ continue;
+
kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) {
@@ -2832,8 +2857,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
*ef = dma_fence_get(&new_fence->base);
/* Attach new eviction fence to all BOs except pinned ones */
- list_for_each_entry(mem, &process_info->kfd_bo_list,
- validate_list.head) {
+ list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
if (mem->bo->tbo.pin_count)
continue;
@@ -2852,11 +2876,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
}
validate_map_fail:
- ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
amdgpu_sync_free(&sync_obj);
ttm_reserve_fail:
+ drm_exec_fini(&exec);
mutex_unlock(&process_info->lock);
- kfree(pd_bo_list);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9ba4817a9148..73ee14f7a9a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1776,7 +1776,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct atom_context *ctx = adev->mode_info.atom_context;
- return sysfs_emit(buf, "%s\n", ctx->vbios_version);
+ return sysfs_emit(buf, "%s\n", ctx->vbios_ver_str);
}
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
@@ -1791,6 +1791,15 @@ const struct attribute_group amdgpu_vbios_version_attr_group = {
.attrs = amdgpu_vbios_version_attrs
};
+int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.atom_context)
+ return devm_device_add_group(adev->dev,
+ &amdgpu_vbios_version_attr_group);
+
+ return 0;
+}
+
/**
* amdgpu_atombios_fini - free the driver info and callbacks for atombios
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 4153d520e2a3..0811474e8fd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -89,8 +89,7 @@ struct atom_memory_info {
#define MAX_AC_TIMING_ENTRIES 16
-struct atom_memory_clock_range_table
-{
+struct atom_memory_clock_range_table {
u8 num_entries;
u8 rsv[3];
u32 mclk[MAX_AC_TIMING_ENTRIES];
@@ -118,14 +117,12 @@ struct atom_mc_reg_table {
#define MAX_VOLTAGE_ENTRIES 32
-struct atom_voltage_table_entry
-{
+struct atom_voltage_table_entry {
u16 value;
u32 smio_low;
};
-struct atom_voltage_table
-{
+struct atom_voltage_table {
u32 count;
u32 mask_low;
u32 phase_delay;
@@ -217,5 +214,6 @@ int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
void amdgpu_atombios_fini(struct amdgpu_device *adev);
int amdgpu_atombios_init(struct amdgpu_device *adev);
+int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index ac6fe0ae4609..835980e94b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -58,7 +58,7 @@ uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *ade
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
index, &size, &frev, &crev, &data_offset)) {
/* support firmware_info 3.1 + */
- if ((frev == 3 && crev >=1) || (frev > 3)) {
+ if ((frev == 3 && crev >= 1) || (frev > 3)) {
firmware_info = (union firmware_info *)
(mode_info->atom_context->bios + data_offset);
fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
@@ -272,6 +272,7 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
break;
case ATOM_DGPU_VRAM_TYPE_HBM2:
case ATOM_DGPU_VRAM_TYPE_HBM2E:
+ case ATOM_DGPU_VRAM_TYPE_HBM3:
vram_type = AMDGPU_VRAM_TYPE_HBM;
break;
case ATOM_DGPU_VRAM_TYPE_GDDR6:
@@ -326,10 +327,13 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = igp_info->v11.umachannelnumber;
if (!mem_channel_number)
mem_channel_number = 1;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
mem_type = igp_info->v11.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
break;
@@ -344,10 +348,13 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
mem_channel_number = igp_info->v21.umachannelnumber;
if (!mem_channel_number)
mem_channel_number = 1;
- /* channel width is 64 */
- if (vram_width)
- *vram_width = mem_channel_number * 64;
mem_type = igp_info->v21.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
break;
@@ -590,7 +597,7 @@ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
index, &size, &frev, &crev,
&data_offset)) {
/* support firmware_info 3.4 + */
- if ((frev == 3 && crev >=4) || (frev > 3)) {
+ if ((frev == 3 && crev >= 4) || (frev > 3)) {
firmware_info = (union firmware_info *)
(mode_info->atom_context->bios + data_offset);
/* The ras_rom_i2c_slave_addr should ideally
@@ -843,7 +850,7 @@ int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
firmware_info = (union firmware_info *)(ctx->bios + data_offset);
- if (frev !=3)
+ if (frev != 3)
return -EINVAL;
switch (crev) {
@@ -902,7 +909,7 @@ int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
}
index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
- asic_init);
+ asic_init);
if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) {
if (frev == 2 && crev >= 1) {
memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index d6d986be906a..375f02002579 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -74,24 +74,29 @@ struct atpx_mux {
u16 mux;
} __packed;
-bool amdgpu_has_atpx(void) {
+bool amdgpu_has_atpx(void)
+{
return amdgpu_atpx_priv.atpx_detected;
}
-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+bool amdgpu_has_atpx_dgpu_power_cntl(void)
+{
return amdgpu_atpx_priv.atpx.functions.power_cntl;
}
-bool amdgpu_is_atpx_hybrid(void) {
+bool amdgpu_is_atpx_hybrid(void)
+{
return amdgpu_atpx_priv.atpx.is_hybrid;
}
-bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
+bool amdgpu_atpx_dgpu_req_power_for_displays(void)
+{
return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
}
#if defined(CONFIG_ACPI)
-void *amdgpu_atpx_get_dhandle(void) {
+void *amdgpu_atpx_get_dhandle(void)
+{
return amdgpu_atpx_priv.dhandle;
}
#endif
@@ -134,7 +139,7 @@ static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function,
/* Fail only if calling the method fails and ATPX is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk("failed to evaluate ATPX got %s\n",
+ pr_err("failed to evaluate ATPX got %s\n",
acpi_format_exception(status));
kfree(buffer.pointer);
return NULL;
@@ -190,7 +195,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
size = *(u16 *) info->buffer.pointer;
if (size < 10) {
- printk("ATPX buffer is too small: %zu\n", size);
+ pr_err("ATPX buffer is too small: %zu\n", size);
kfree(info);
return -EINVAL;
}
@@ -223,11 +228,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) {
- printk("ATPX Hybrid Graphics, forcing to ATPX\n");
+ pr_warn("ATPX Hybrid Graphics, forcing to ATPX\n");
atpx->functions.power_cntl = true;
atpx->is_hybrid = false;
} else {
- printk("ATPX Hybrid Graphics\n");
+ pr_notice("ATPX Hybrid Graphics\n");
/*
* Disable legacy PM methods only when pcie port PM is usable,
* otherwise the device might fail to power off or power on.
@@ -269,7 +274,7 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
size = *(u16 *) info->buffer.pointer;
if (size < 8) {
- printk("ATPX buffer is too small: %zu\n", size);
+ pr_err("ATPX buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
@@ -278,8 +283,8 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
- printk("ATPX version %u, functions 0x%08x\n",
- output.version, output.function_bits);
+ pr_notice("ATPX version %u, functions 0x%08x\n",
+ output.version, output.function_bits);
amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 30c28a69e847..38ccec913f00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -104,9 +104,8 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
bios = ioremap_wc(vram_base, size);
- if (!bios) {
+ if (!bios)
return false;
- }
adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) {
@@ -133,9 +132,8 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
adev->bios = NULL;
/* XXX: some cards may return 0 for rom size? ddx has a workaround */
bios = pci_map_rom(adev->pdev, &size);
- if (!bios) {
+ if (!bios)
return false;
- }
adev->bios = kzalloc(size, GFP_KERNEL);
if (adev->bios == NULL) {
@@ -168,9 +166,9 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
header[AMD_VBIOS_SIGNATURE_END] = 0;
if ((!AMD_IS_VALID_VBIOS(header)) ||
- 0 != memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET],
- AMD_VBIOS_SIGNATURE,
- strlen(AMD_VBIOS_SIGNATURE)))
+ memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET],
+ AMD_VBIOS_SIGNATURE,
+ strlen(AMD_VBIOS_SIGNATURE)) != 0)
return false;
/* valid vbios, go on */
@@ -264,7 +262,7 @@ static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
if (ACPI_FAILURE(status)) {
- printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+ DRM_ERROR("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
@@ -363,7 +361,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
struct acpi_table_header *hdr;
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
- unsigned offset;
+ unsigned int offset;
if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
@@ -462,7 +460,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
return false;
success:
- adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false;
+ adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10;
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 252a876b0725..b6298e901cbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -28,6 +28,7 @@
* Christian König <[email protected]>
*/
+#include <linux/sort.h>
#include <linux/uaccess.h>
#include "amdgpu.h"
@@ -50,13 +51,20 @@ static void amdgpu_bo_list_free(struct kref *ref)
refcount);
struct amdgpu_bo_list_entry *e;
- amdgpu_bo_list_for_each_entry(e, list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ amdgpu_bo_list_for_each_entry(e, list)
+ amdgpu_bo_unref(&e->bo);
+ call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
+}
- amdgpu_bo_unref(&bo);
- }
+static int amdgpu_bo_list_entry_cmp(const void *_a, const void *_b)
+{
+ const struct amdgpu_bo_list_entry *a = _a, *b = _b;
- call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
+ if (a->priority > b->priority)
+ return 1;
+ if (a->priority < b->priority)
+ return -1;
+ return 0;
}
int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
@@ -118,7 +126,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
- entry->tv.bo = &bo->tbo;
+ entry->bo = bo;
if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
list->gds_obj = bo;
@@ -133,6 +141,8 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
list->first_userptr = first_userptr;
list->num_entries = num_entries;
+ sort(array, last_entry, sizeof(struct amdgpu_bo_list_entry),
+ amdgpu_bo_list_entry_cmp, NULL);
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
@@ -141,16 +151,10 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
return 0;
error_free:
- for (i = 0; i < last_entry; ++i) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
-
- amdgpu_bo_unref(&bo);
- }
- for (i = first_userptr; i < num_entries; ++i) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
-
- amdgpu_bo_unref(&bo);
- }
+ for (i = 0; i < last_entry; ++i)
+ amdgpu_bo_unref(&array[i].bo);
+ for (i = first_userptr; i < num_entries; ++i)
+ amdgpu_bo_unref(&array[i].bo);
kvfree(list);
return r;
@@ -182,41 +186,6 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
return -ENOENT;
}
-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
- struct list_head *validated)
-{
- /* This is based on the bucket sort with O(n) time complexity.
- * An item with priority "i" is added to bucket[i]. The lists are then
- * concatenated in descending order.
- */
- struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
- struct amdgpu_bo_list_entry *e;
- unsigned i;
-
- for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
- INIT_LIST_HEAD(&bucket[i]);
-
- /* Since buffers which appear sooner in the relocation list are
- * likely to be used more often than buffers which appear later
- * in the list, the sort mustn't change the ordering of buffers
- * with the same priority, i.e. it must be stable.
- */
- amdgpu_bo_list_for_each_entry(e, list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- unsigned priority = e->priority;
-
- if (!bo->parent)
- list_add_tail(&e->tv.head, &bucket[priority]);
-
- e->user_pages = NULL;
- e->range = NULL;
- }
-
- /* Connect the sorted buckets in the output list. */
- for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
- list_splice(&bucket[i], validated);
-}
-
void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
{
kref_put(&list->refcount, amdgpu_bo_list_free);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index ededdc01ca28..26c01cb131f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -23,7 +23,6 @@
#ifndef __AMDGPU_BO_LIST_H__
#define __AMDGPU_BO_LIST_H__
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/amdgpu_drm.h>
struct hmm_range;
@@ -36,7 +35,7 @@ struct amdgpu_bo_va;
struct amdgpu_fpriv;
struct amdgpu_bo_list_entry {
- struct ttm_validate_buffer tv;
+ struct amdgpu_bo *bo;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
struct page **user_pages;
@@ -60,8 +59,6 @@ struct amdgpu_bo_list {
int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
struct amdgpu_bo_list **result);
-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
- struct list_head *validated);
void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
struct drm_amdgpu_bo_list_entry **info_param);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 456e385333b6..b8280be6225d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -41,13 +41,13 @@ struct amdgpu_cgs_device {
((struct amdgpu_cgs_device *)cgs_device)->adev
-static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
+static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned int offset)
{
CGS_FUNC_ADEV;
return RREG32(offset);
}
-static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
+static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned int offset,
uint32_t value)
{
CGS_FUNC_ADEV;
@@ -56,7 +56,7 @@ static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned of
static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
enum cgs_ind_reg space,
- unsigned index)
+ unsigned int index)
{
CGS_FUNC_ADEV;
switch (space) {
@@ -84,7 +84,7 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
enum cgs_ind_reg space,
- unsigned index, uint32_t value)
+ unsigned int index, uint32_t value)
{
CGS_FUNC_ADEV;
switch (space) {
@@ -163,38 +163,38 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
uint16_t fw_version = 0;
switch (type) {
- case CGS_UCODE_ID_SDMA0:
- fw_version = adev->sdma.instance[0].fw_version;
- break;
- case CGS_UCODE_ID_SDMA1:
- fw_version = adev->sdma.instance[1].fw_version;
- break;
- case CGS_UCODE_ID_CP_CE:
- fw_version = adev->gfx.ce_fw_version;
- break;
- case CGS_UCODE_ID_CP_PFP:
- fw_version = adev->gfx.pfp_fw_version;
- break;
- case CGS_UCODE_ID_CP_ME:
- fw_version = adev->gfx.me_fw_version;
- break;
- case CGS_UCODE_ID_CP_MEC:
- fw_version = adev->gfx.mec_fw_version;
- break;
- case CGS_UCODE_ID_CP_MEC_JT1:
- fw_version = adev->gfx.mec_fw_version;
- break;
- case CGS_UCODE_ID_CP_MEC_JT2:
- fw_version = adev->gfx.mec_fw_version;
- break;
- case CGS_UCODE_ID_RLC_G:
- fw_version = adev->gfx.rlc_fw_version;
- break;
- case CGS_UCODE_ID_STORAGE:
- break;
- default:
- DRM_ERROR("firmware type %d do not have version\n", type);
- break;
+ case CGS_UCODE_ID_SDMA0:
+ fw_version = adev->sdma.instance[0].fw_version;
+ break;
+ case CGS_UCODE_ID_SDMA1:
+ fw_version = adev->sdma.instance[1].fw_version;
+ break;
+ case CGS_UCODE_ID_CP_CE:
+ fw_version = adev->gfx.ce_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_PFP:
+ fw_version = adev->gfx.pfp_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_ME:
+ fw_version = adev->gfx.me_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC_JT1:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC_JT2:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_RLC_G:
+ fw_version = adev->gfx.rlc_fw_version;
+ break;
+ case CGS_UCODE_ID_STORAGE:
+ break;
+ default:
+ DRM_ERROR("firmware type %d do not have version\n", type);
+ break;
}
return fw_version;
}
@@ -205,7 +205,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
{
CGS_FUNC_ADEV;
- if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
+ if (type != CGS_UCODE_ID_SMU && type != CGS_UCODE_ID_SMU_SK) {
uint64_t gpu_addr;
uint32_t data_size;
const struct gfx_firmware_header_v1_0 *header;
@@ -232,7 +232,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->mc_addr = gpu_addr;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
- if (CGS_UCODE_ID_CP_MEC == type)
+ if (type == CGS_UCODE_ID_CP_MEC)
info->image_size = le32_to_cpu(header->jt_offset) << 2;
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 6be30dcb029d..d34037b85cf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -593,11 +593,20 @@ static int amdgpu_connector_set_property(struct drm_connector *connector,
switch (val) {
default:
- case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
- case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
- case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
- case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+ case DRM_MODE_SCALE_NONE:
+ rmx_type = RMX_OFF;
+ break;
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
}
+
if (amdgpu_encoder->rmx_type == rmx_type)
return 0;
@@ -799,12 +808,21 @@ static int amdgpu_connector_set_lcd_property(struct drm_connector *connector,
}
switch (value) {
- case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
- case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
- case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+ case DRM_MODE_SCALE_NONE:
+ rmx_type = RMX_OFF;
+ break;
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
default:
- case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
}
+
if (amdgpu_encoder->rmx_type == rmx_type)
return 0;
@@ -1127,7 +1145,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
/* assume digital unless load detected otherwise */
amdgpu_connector->use_digital = true;
lret = encoder_funcs->detect(encoder, connector);
- DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+ DRM_DEBUG_KMS("load_detect %x returned: %x\n",
+ encoder->encoder_type, lret);
if (lret == connector_status_connected)
amdgpu_connector->use_digital = false;
}
@@ -1991,7 +2010,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
if (i2c_bus->valid) {
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ DRM_CONNECTOR_POLL_DISCONNECT;
}
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 2eb2c66843a8..49dd9aa8da70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -65,6 +65,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
}
amdgpu_sync_create(&p->sync);
+ drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
return 0;
}
@@ -112,6 +113,9 @@ static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
if (r < 0)
return r;
+ if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type))
+ return -EINVAL;
+
++(num_ibs[r]);
p->gang_leader_idx = r;
return 0;
@@ -122,7 +126,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
uint32_t *offset)
{
struct drm_gem_object *gobj;
- struct amdgpu_bo *bo;
unsigned long size;
int r;
@@ -130,21 +133,16 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
if (gobj == NULL)
return -EINVAL;
- bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
- p->uf_entry.priority = 0;
- p->uf_entry.tv.bo = &bo->tbo;
- /* One for TTM and two for the CS job */
- p->uf_entry.tv.num_shared = 3;
-
+ p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
drm_gem_object_put(gobj);
- size = amdgpu_bo_size(bo);
+ size = amdgpu_bo_size(p->uf_bo);
if (size != PAGE_SIZE || (data->offset + 8) > size) {
r = -EINVAL;
goto error_unref;
}
- if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+ if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) {
r = -EINVAL;
goto error_unref;
}
@@ -154,7 +152,7 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
return 0;
error_unref:
- amdgpu_bo_unref(&bo);
+ amdgpu_bo_unref(&p->uf_bo);
return r;
}
@@ -192,7 +190,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
uint64_t *chunk_array_user;
uint64_t *chunk_array;
uint32_t uf_offset = 0;
- unsigned int size;
+ size_t size;
int ret;
int i;
@@ -285,6 +283,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+ case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
break;
default:
@@ -294,7 +293,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
if (!p->gang_size) {
ret = -EINVAL;
- goto free_partial_kdata;
+ goto free_all_kdata;
}
for (i = 0; i < p->gang_size; ++i) {
@@ -305,12 +304,12 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
p->gang_leader = p->jobs[p->gang_leader_idx];
- if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
+ if (p->ctx->generation != p->gang_leader->generation) {
ret = -ECANCELED;
goto free_all_kdata;
}
- if (p->uf_entry.tv.bo)
+ if (p->uf_bo)
p->gang_leader->uf_addr = uf_offset;
kvfree(chunk_array);
@@ -355,7 +354,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
ib = &job->ibs[job->num_ibs++];
/* MM engine doesn't support user fences */
- if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
+ if (p->uf_bo && ring->funcs->no_user_fence)
return -EINVAL;
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
@@ -393,7 +392,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
{
struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- unsigned num_deps;
+ unsigned int num_deps;
int i, r;
num_deps = chunk->length_dw * 4 /
@@ -464,7 +463,7 @@ static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
- unsigned num_deps;
+ unsigned int num_deps;
int i, r;
num_deps = chunk->length_dw * 4 /
@@ -482,7 +481,7 @@ static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
- unsigned num_deps;
+ unsigned int num_deps;
int i, r;
num_deps = chunk->length_dw * 4 /
@@ -502,7 +501,7 @@ static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
- unsigned num_deps;
+ unsigned int num_deps;
int i;
num_deps = chunk->length_dw * 4 /
@@ -536,7 +535,7 @@ static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
- unsigned num_deps;
+ unsigned int num_deps;
int i;
num_deps = chunk->length_dw * 4 /
@@ -575,6 +574,26 @@ static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
return 0;
}
+static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata;
+ int i;
+
+ if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
+ return -EINVAL;
+
+ for (i = 0; i < p->gang_size; ++i) {
+ p->jobs[i]->shadow_va = shadow->shadow_va;
+ p->jobs[i]->csa_va = shadow->csa_va;
+ p->jobs[i]->gds_va = shadow->gds_va;
+ p->jobs[i]->init_shadow =
+ shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
+ }
+
+ return 0;
+}
+
static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
{
unsigned int ce_preempt = 0, de_preempt = 0;
@@ -617,6 +636,11 @@ static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
if (r)
return r;
break;
+ case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
+ r = amdgpu_cs_p2_shadow(p, chunk);
+ if (r)
+ return r;
+ break;
}
}
@@ -729,6 +753,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
if (used_vis_vram < total_vis_vram) {
u64 free_vis_vram = total_vis_vram - used_vis_vram;
+
adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
increment_us, us_upper_bound);
@@ -814,55 +839,18 @@ retry:
return r;
}
-static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
- struct list_head *validated)
-{
- struct ttm_operation_ctx ctx = { true, false };
- struct amdgpu_bo_list_entry *lobj;
- int r;
-
- list_for_each_entry(lobj, validated, tv.head) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
- struct mm_struct *usermm;
-
- usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
- if (usermm && usermm != current->mm)
- return -EPERM;
-
- if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
- lobj->user_invalidated && lobj->user_pages) {
- amdgpu_bo_placement_from_domain(bo,
- AMDGPU_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r)
- return r;
-
- amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
- lobj->user_pages);
- }
-
- r = amdgpu_cs_bo_validate(p, bo);
- if (r)
- return r;
-
- kvfree(lobj->user_pages);
- lobj->user_pages = NULL;
- }
- return 0;
-}
-
static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
- struct list_head duplicates;
+ struct drm_gem_object *obj;
+ unsigned long index;
unsigned int i;
int r;
- INIT_LIST_HEAD(&p->validated);
-
/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
if (cs->in.bo_list_handle) {
if (p->bo_list)
@@ -882,25 +870,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
mutex_lock(&p->bo_list->bo_list_mutex);
- /* One for TTM and one for the CS job */
- amdgpu_bo_list_for_each_entry(e, p->bo_list)
- e->tv.num_shared = 2;
-
- amdgpu_bo_list_get_list(p->bo_list, &p->validated);
-
- INIT_LIST_HEAD(&duplicates);
- amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
-
- if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
- list_add(&p->uf_entry.tv.head, &p->validated);
-
/* Get userptr backing pages. If pages are updated after registered
* in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
* amdgpu_ttm_backend_bind() to flush and invalidate new pages
*/
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
bool userpage_invalidated = false;
+ struct amdgpu_bo *bo = e->bo;
int i;
e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
@@ -928,18 +904,56 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
e->user_invalidated = userpage_invalidated;
}
- r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
- goto out_free_user_pages;
+ drm_exec_until_all_locked(&p->exec) {
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size);
+ drm_exec_retry_on_contention(&p->exec);
+ if (unlikely(r))
+ goto out_free_user_pages;
+
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ /* One fence for TTM and one for each CS job */
+ r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base,
+ 1 + p->gang_size);
+ drm_exec_retry_on_contention(&p->exec);
+ if (unlikely(r))
+ goto out_free_user_pages;
+
+ e->bo_va = amdgpu_vm_bo_find(vm, e->bo);
+ }
+
+ if (p->uf_bo) {
+ r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
+ 1 + p->gang_size);
+ drm_exec_retry_on_contention(&p->exec);
+ if (unlikely(r))
+ goto out_free_user_pages;
+ }
}
- amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct mm_struct *usermm;
+
+ usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm);
+ if (usermm && usermm != current->mm) {
+ r = -EPERM;
+ goto out_free_user_pages;
+ }
- e->bo_va = amdgpu_vm_bo_find(vm, bo);
+ if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
+ e->user_invalidated && e->user_pages) {
+ amdgpu_bo_placement_from_domain(e->bo,
+ AMDGPU_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
+ &ctx);
+ if (r)
+ goto out_free_user_pages;
+
+ amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
+ e->user_pages);
+ }
+
+ kvfree(e->user_pages);
+ e->user_pages = NULL;
}
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -951,25 +965,21 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_bo_validate, p);
if (r) {
DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
- goto error_validate;
+ goto out_free_user_pages;
}
- r = amdgpu_cs_list_validate(p, &duplicates);
- if (r)
- goto error_validate;
-
- r = amdgpu_cs_list_validate(p, &p->validated);
- if (r)
- goto error_validate;
-
- if (p->uf_entry.tv.bo) {
- struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
+ drm_exec_for_each_locked_object(&p->exec, index, obj) {
+ r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj));
+ if (unlikely(r))
+ goto out_free_user_pages;
+ }
- r = amdgpu_ttm_alloc_gart(&uf->tbo);
- if (r)
- goto error_validate;
+ if (p->uf_bo) {
+ r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo);
+ if (unlikely(r))
+ goto out_free_user_pages;
- p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
+ p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo);
}
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
@@ -981,12 +991,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list->oa_obj);
return 0;
-error_validate:
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
-
out_free_user_pages:
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ struct amdgpu_bo *bo = e->bo;
if (!e->user_pages)
continue;
@@ -1047,9 +1054,8 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
/* the IB should be reserved at this point */
r = amdgpu_bo_kmap(aobj, (void **)&kptr);
- if (r) {
+ if (r)
return r;
- }
kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
@@ -1093,7 +1099,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct amdgpu_bo_va *bo_va;
- struct amdgpu_bo *bo;
unsigned int i;
int r;
@@ -1122,11 +1127,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
}
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- /* ignore duplicates */
- bo = ttm_to_amdgpu_bo(e->tv.bo);
- if (!bo)
- continue;
-
bo_va = e->bo_va;
if (bo_va == NULL)
continue;
@@ -1164,7 +1164,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (amdgpu_vm_debug) {
/* Invalidate all BOs to test for userspace bugs */
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ struct amdgpu_bo *bo = e->bo;
/* ignore duplicates */
if (!bo)
@@ -1181,8 +1181,9 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_gpu_scheduler *sched;
- struct amdgpu_bo_list_entry *e;
+ struct drm_gem_object *obj;
struct dma_fence *fence;
+ unsigned long index;
unsigned int i;
int r;
@@ -1193,8 +1194,9 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
return r;
}
- list_for_each_entry(e, &p->validated, tv.head) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ drm_exec_for_each_locked_object(&p->exec, index, obj) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
struct dma_resv *resv = bo->tbo.base.resv;
enum amdgpu_sync_mode sync_mode;
@@ -1258,6 +1260,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_job *leader = p->gang_leader;
struct amdgpu_bo_list_entry *e;
+ struct drm_gem_object *gobj;
+ unsigned long index;
unsigned int i;
uint64_t seq;
int r;
@@ -1296,9 +1300,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
*/
r = 0;
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
-
- r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
+ r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
+ e->range);
e->range = NULL;
}
if (r) {
@@ -1308,20 +1311,22 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
p->fence = dma_fence_get(&leader->base.s_fence->finished);
- list_for_each_entry(e, &p->validated, tv.head) {
+ drm_exec_for_each_locked_object(&p->exec, index, gobj) {
+
+ ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
/* Everybody except for the gang leader uses READ */
for (i = 0; i < p->gang_size; ++i) {
if (p->jobs[i] == leader)
continue;
- dma_resv_add_fence(e->tv.bo->base.resv,
+ dma_resv_add_fence(gobj->resv,
&p->jobs[i]->base.s_fence->finished,
DMA_RESV_USAGE_READ);
}
- /* The gang leader is remembered as writer */
- e->tv.num_shared = 0;
+ /* The gang leader as remembered as writer */
+ dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
}
seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
@@ -1337,7 +1342,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
cs->out.handle = seq;
leader->uf_sequence = seq;
- amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
+ amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
for (i = 0; i < p->gang_size; ++i) {
amdgpu_job_free_resources(p->jobs[i]);
trace_amdgpu_cs_ioctl(p->jobs[i]);
@@ -1346,7 +1351,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
- ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
mutex_unlock(&p->adev->notifier_lock);
mutex_unlock(&p->bo_list->bo_list_mutex);
@@ -1356,9 +1360,11 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
/* Cleanup the parser structure */
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
{
- unsigned i;
+ unsigned int i;
amdgpu_sync_free(&parser->sync);
+ drm_exec_fini(&parser->exec);
+
for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj);
kfree(parser->post_deps[i].chain);
@@ -1379,11 +1385,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
if (parser->jobs[i])
amdgpu_job_free(parser->jobs[i]);
}
- if (parser->uf_entry.tv.bo) {
- struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
-
- amdgpu_bo_unref(&uf);
- }
+ amdgpu_bo_unref(&parser->uf_bo);
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -1444,7 +1446,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return 0;
error_backoff:
- ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
mutex_unlock(&parser.bo_list->bo_list_mutex);
error_fini:
@@ -1624,15 +1625,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
continue;
r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r > 0 && fence->error)
+ r = fence->error;
+
dma_fence_put(fence);
if (r < 0)
return r;
if (r == 0)
break;
-
- if (fence->error)
- return fence->error;
}
memset(wait, 0, sizeof(*wait));
@@ -1779,7 +1780,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
*map = mapping;
/* Double check that the BO is reserved by this CS */
- if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
+ if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
return -EINVAL;
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
index fb3e3d56d427..39c33ad100cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -24,6 +24,7 @@
#define __AMDGPU_CS_H__
#include <linux/ww_mutex.h>
+#include <drm/drm_exec.h>
#include "amdgpu_job.h"
#include "amdgpu_bo_list.h"
@@ -62,11 +63,9 @@ struct amdgpu_cs_parser {
struct amdgpu_job *gang_leader;
/* buffer objects */
- struct ww_acquire_ctx ticket;
+ struct drm_exec exec;
struct amdgpu_bo_list *bo_list;
struct amdgpu_mn *mn;
- struct amdgpu_bo_list_entry vm_pd;
- struct list_head validated;
struct dma_fence *fence;
uint64_t bytes_moved_threshold;
uint64_t bytes_moved_vis_threshold;
@@ -74,7 +73,7 @@ struct amdgpu_cs_parser {
uint64_t bytes_moved_vis;
/* user fence */
- struct amdgpu_bo_list_entry uf_entry;
+ struct amdgpu_bo *uf_bo;
unsigned num_post_deps;
struct amdgpu_cs_post_dep *post_deps;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index c6d4d41c4393..720011019741 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -22,6 +22,8 @@
* * Author: [email protected]
*/
+#include <drm/drm_exec.h>
+
#include "amdgpu.h"
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
@@ -65,31 +67,25 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
uint64_t csa_addr, uint32_t size)
{
- struct ww_acquire_ctx ticket;
- struct list_head list;
- struct amdgpu_bo_list_entry pd;
- struct ttm_validate_buffer csa_tv;
+ struct drm_exec exec;
int r;
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&csa_tv.head);
- csa_tv.bo = &bo->tbo;
- csa_tv.num_shared = 1;
-
- list_add(&csa_tv.head, &list);
- amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
- if (r) {
- DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
- return r;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec) {
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ if (likely(!r))
+ r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+ goto error;
+ }
}
*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
if (!*bo_va) {
- ttm_eu_backoff_reservation(&ticket, &list);
- DRM_ERROR("failed to create bo_va for static CSA\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto error;
}
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
@@ -99,10 +95,42 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
+ goto error;
}
- ttm_eu_backoff_reservation(&ticket, &list);
- return 0;
+error:
+ drm_exec_fini(&exec);
+ return r;
+}
+
+int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
+ uint64_t csa_addr)
+{
+ struct drm_exec exec;
+ int r;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec) {
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ if (likely(!r))
+ r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+ goto error;
+ }
+ }
+
+ r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr);
+ if (r) {
+ DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r);
+ goto error;
+ }
+
+ amdgpu_vm_bo_del(adev, bo_va);
+
+error:
+ drm_exec_fini(&exec);
+ return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
index 524b4437a021..7dfc1f2012eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
@@ -34,6 +34,9 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
uint64_t csa_addr, uint32_t size);
+int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
+ uint64_t csa_addr);
void amdgpu_free_static_csa(struct amdgpu_bo **bo);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index d2139ac12159..0dc9c655c4fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -222,8 +222,19 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
- scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
- num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+
+ if (!(adev)->xcp_mgr) {
+ scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+ num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+ } else {
+ struct amdgpu_fpriv *fpriv;
+
+ fpriv = container_of(ctx->ctx_mgr, struct amdgpu_fpriv, ctx_mgr);
+ r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,
+ &num_scheds, &scheds);
+ if (r)
+ goto cleanup_entity;
+ }
/* disable load balance if the hw engine retains context among dependent jobs */
if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
@@ -255,7 +266,8 @@ error_free_entity:
return r;
}
-static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
+static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_device *adev,
+ struct amdgpu_ctx_entity *entity)
{
ktime_t res = ns_to_ktime(0);
int i;
@@ -268,6 +280,8 @@ static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
dma_fence_put(entity->fences[i]);
}
+ amdgpu_xcp_release_sched(adev, entity);
+
kfree(entity);
return res;
}
@@ -303,6 +317,7 @@ static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
struct drm_file *filp, struct amdgpu_ctx *ctx)
{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
u32 current_stable_pstate;
int r;
@@ -318,7 +333,7 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
- ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
+ ctx->generation = amdgpu_vm_generation(mgr->adev, &fpriv->vm);
ctx->init_priority = priority;
ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
@@ -331,6 +346,7 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
else
ctx->stable_pstate = current_stable_pstate;
+ ctx->ctx_mgr = &(fpriv->ctx_mgr);
return 0;
}
@@ -399,7 +415,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
ktime_t spend;
- spend = amdgpu_ctx_fini_entity(ctx->entities[i][j]);
+ spend = amdgpu_ctx_fini_entity(adev, ctx->entities[i][j]);
atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
}
}
@@ -416,6 +432,7 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 ring, struct drm_sched_entity **entity)
{
int r;
+ struct drm_sched_entity *ctx_entity;
if (hw_ip >= AMDGPU_HW_IP_NUM) {
DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
@@ -439,7 +456,14 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
return r;
}
- *entity = &ctx->entities[hw_ip][ring]->entity;
+ ctx_entity = &ctx->entities[hw_ip][ring]->entity;
+ r = drm_sched_entity_error(ctx_entity);
+ if (r) {
+ DRM_DEBUG("error entity %p\n", ctx_entity);
+ return r;
+ }
+
+ *entity = ctx_entity;
return 0;
}
@@ -570,12 +594,15 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
- if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+ if (ctx->generation != amdgpu_vm_generation(adev, &fpriv->vm))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
+ if (amdgpu_in_reset(adev))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS;
+
if (adev->ras_enabled && con) {
/* Return the cached values in O(1),
* and schedule delayed work to cache
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 0fa0e56daf67..85376baaa92f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -47,7 +47,7 @@ struct amdgpu_ctx {
struct amdgpu_ctx_mgr *mgr;
unsigned reset_counter;
unsigned reset_counter_query;
- uint32_t vram_lost_counter;
+ uint64_t generation;
spinlock_t ring_lock;
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
bool preamble_presented;
@@ -57,6 +57,7 @@ struct amdgpu_ctx {
unsigned long ras_counter_ce;
unsigned long ras_counter_ue;
uint32_t stable_pstate;
+ struct amdgpu_ctx_mgr *ctx_mgr;
};
struct amdgpu_ctx_mgr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f60753f97ac5..a4faea4fa0b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -56,14 +56,14 @@
*
* Bit 62: Indicates a GRBM bank switch is needed
* Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is
- * zero)
+ * zero)
* Bits 24..33: The SE or ME selector if needed
* Bits 34..43: The SH (or SA) or PIPE selector if needed
* Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
*
* Bit 23: Indicates that the PM power gating lock should be held
- * This is necessary to read registers that might be
- * unreliable during a power gating transistion.
+ * This is necessary to read registers that might be
+ * unreliable during a power gating transistion.
*
* The lower bits are the BYTE offset of the register to read. This
* allows reading multiple registers in a single call and having
@@ -76,7 +76,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
ssize_t result = 0;
int r;
bool pm_pg_lock, use_bank, use_ring;
- unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
+ unsigned int instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
pm_pg_lock = use_bank = use_ring = false;
instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
@@ -136,10 +136,10 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
}
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
+ sh_bank, instance_bank, 0);
} else if (use_ring) {
mutex_lock(&adev->srbm_mutex);
- amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
+ amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid, 0);
}
if (pm_pg_lock)
@@ -154,7 +154,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
- amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
+ amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
}
if (r) {
result = r;
@@ -169,10 +169,10 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
end:
if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
} else if (use_ring) {
- amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
+ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
@@ -208,7 +208,7 @@ static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
{
struct amdgpu_debugfs_regs2_data *rd;
- rd = kzalloc(sizeof *rd, GFP_KERNEL);
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return -ENOMEM;
rd->adev = file_inode(file)->i_private;
@@ -221,6 +221,7 @@ static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
{
struct amdgpu_debugfs_regs2_data *rd = file->private_data;
+
mutex_destroy(&rd->lock);
kfree(file->private_data);
return 0;
@@ -262,14 +263,14 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
}
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
- rd->id.grbm.sh,
- rd->id.grbm.instance);
+ rd->id.grbm.sh,
+ rd->id.grbm.instance, rd->id.xcc_id);
}
if (rd->id.use_srbm) {
mutex_lock(&adev->srbm_mutex);
amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe,
- rd->id.srbm.queue, rd->id.srbm.vmid);
+ rd->id.srbm.queue, rd->id.srbm.vmid, rd->id.xcc_id);
}
if (rd->id.pg_lock)
@@ -282,7 +283,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
- amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value);
+ amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value, rd->id.xcc_id);
}
if (r) {
result = r;
@@ -295,12 +296,12 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
}
end:
if (rd->id.use_grbm) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, rd->id.xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
}
if (rd->id.use_srbm) {
- amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
+ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, rd->id.xcc_id);
mutex_unlock(&adev->srbm_mutex);
}
@@ -319,18 +320,45 @@ end:
static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data)
{
struct amdgpu_debugfs_regs2_data *rd = f->private_data;
+ struct amdgpu_debugfs_regs2_iocdata v1_data;
int r;
+ mutex_lock(&rd->lock);
+
switch (cmd) {
+ case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2:
+ r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata_v2 *)data,
+ sizeof(rd->id));
+ if (r)
+ r = -EINVAL;
+ goto done;
case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
- mutex_lock(&rd->lock);
- r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id);
- mutex_unlock(&rd->lock);
- return r ? -EINVAL : 0;
+ r = copy_from_user(&v1_data, (struct amdgpu_debugfs_regs2_iocdata *)data,
+ sizeof(v1_data));
+ if (r) {
+ r = -EINVAL;
+ goto done;
+ }
+ goto v1_copy;
default:
- return -EINVAL;
- }
- return 0;
+ r = -EINVAL;
+ goto done;
+ }
+
+v1_copy:
+ rd->id.use_srbm = v1_data.use_srbm;
+ rd->id.use_grbm = v1_data.use_grbm;
+ rd->id.pg_lock = v1_data.pg_lock;
+ rd->id.grbm.se = v1_data.grbm.se;
+ rd->id.grbm.sh = v1_data.grbm.sh;
+ rd->id.grbm.instance = v1_data.grbm.instance;
+ rd->id.srbm.me = v1_data.srbm.me;
+ rd->id.srbm.pipe = v1_data.srbm.pipe;
+ rd->id.srbm.queue = v1_data.srbm.queue;
+ rd->id.xcc_id = 0;
+done:
+ mutex_unlock(&rd->lock);
+ return r;
}
static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
@@ -343,6 +371,137 @@ static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf
return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1);
}
+static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file)
+{
+ struct amdgpu_debugfs_gprwave_data *rd;
+
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+ rd->adev = file_inode(file)->i_private;
+ file->private_data = rd;
+ mutex_init(&rd->lock);
+
+ return 0;
+}
+
+static int amdgpu_debugfs_gprwave_release(struct inode *inode, struct file *file)
+{
+ struct amdgpu_debugfs_gprwave_data *rd = file->private_data;
+
+ mutex_destroy(&rd->lock);
+ kfree(file->private_data);
+ return 0;
+}
+
+static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
+{
+ struct amdgpu_debugfs_gprwave_data *rd = f->private_data;
+ struct amdgpu_device *adev = rd->adev;
+ ssize_t result = 0;
+ int r;
+ uint32_t *data, x;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return -ENOMEM;
+ }
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, rd->id.se, rd->id.sh, rd->id.cu, rd->id.xcc_id);
+
+ if (!rd->id.gpr_or_wave) {
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, data, &x);
+ } else {
+ x = size >> 2;
+ if (rd->id.gpr.vpgr_or_sgpr) {
+ if (adev->gfx.funcs->read_wave_vgprs)
+ adev->gfx.funcs->read_wave_vgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, rd->id.gpr.thread, *pos, size>>2, data);
+ } else {
+ if (adev->gfx.funcs->read_wave_sgprs)
+ adev->gfx.funcs->read_wave_sgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, *pos, size>>2, data);
+ }
+ }
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ if (!x) {
+ result = -EINVAL;
+ goto done;
+ }
+
+ while (size && (*pos < x * 4)) {
+ uint32_t value;
+
+ value = data[*pos >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto done;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+done:
+ amdgpu_virt_disable_access_debugfs(adev);
+ kfree(data);
+ return result;
+}
+
+static long amdgpu_debugfs_gprwave_ioctl(struct file *f, unsigned int cmd, unsigned long data)
+{
+ struct amdgpu_debugfs_gprwave_data *rd = f->private_data;
+ int r = 0;
+
+ mutex_lock(&rd->lock);
+
+ switch (cmd) {
+ case AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE:
+ if (copy_from_user(&rd->id,
+ (struct amdgpu_debugfs_gprwave_iocdata *)data,
+ sizeof(rd->id)))
+ r = -EFAULT;
+ goto done;
+ default:
+ r = -EINVAL;
+ goto done;
+ }
+
+done:
+ mutex_unlock(&rd->lock);
+ return r;
+}
+
+
+
/**
* amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
@@ -863,7 +1022,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
* The offset being sought changes which wave that the status data
* will be returned for. The bits are used as follows:
*
- * Bits 0..6: Byte offset into data
+ * Bits 0..6: Byte offset into data
* Bits 7..14: SE selector
* Bits 15..22: SH/SA selector
* Bits 23..30: CU/{WGP+SIMD} selector
@@ -907,13 +1066,13 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0);
x = 0;
if (adev->gfx.funcs->read_wave_data)
- adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+ adev->gfx.funcs->read_wave_data(adev, 0, simd, wave, data, &x);
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -1001,17 +1160,17 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0);
if (bank == 0) {
if (adev->gfx.funcs->read_wave_vgprs)
- adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
+ adev->gfx.funcs->read_wave_vgprs(adev, 0, simd, wave, thread, offset, size>>2, data);
} else {
if (adev->gfx.funcs->read_wave_sgprs)
- adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
+ adev->gfx.funcs->read_wave_sgprs(adev, 0, simd, wave, offset, size>>2, data);
}
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -1339,6 +1498,15 @@ static const struct file_operations amdgpu_debugfs_regs2_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gprwave_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = amdgpu_debugfs_gprwave_ioctl,
+ .read = amdgpu_debugfs_gprwave_read,
+ .open = amdgpu_debugfs_gprwave_open,
+ .release = amdgpu_debugfs_gprwave_release,
+ .llseek = default_llseek
+};
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -1416,6 +1584,7 @@ static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = {
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs2_fops,
+ &amdgpu_debugfs_gprwave_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
@@ -1429,9 +1598,10 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_gfxoff_residency_fops,
};
-static const char *debugfs_regs_names[] = {
+static const char * const debugfs_regs_names[] = {
"amdgpu_regs",
"amdgpu_regs2",
+ "amdgpu_gprwave",
"amdgpu_regs_didt",
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
@@ -1447,7 +1617,7 @@ static const char *debugfs_regs_names[] = {
/**
* amdgpu_debugfs_regs_init - Initialize debugfs entries that provide
- * register access.
+ * register access.
*
* @adev: The device to attach the debugfs entries to
*/
@@ -1459,7 +1629,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
ent = debugfs_create_file(debugfs_regs_names[i],
- S_IFREG | S_IRUGO, root,
+ S_IFREG | 0444, root,
adev, debugfs_regs[i]);
if (!i && !IS_ERR_OR_NULL(ent))
i_size_write(ent->d_inode, adev->rmmio_size);
@@ -1470,7 +1640,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct amdgpu_device *adev = m->private;
struct drm_device *dev = adev_to_drm(adev);
int r = 0, i;
@@ -1494,12 +1664,12 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
kthread_park(ring->sched.thread);
}
- seq_printf(m, "run ib test:\n");
+ seq_puts(m, "run ib test:\n");
r = amdgpu_ib_ring_tests(adev);
if (r)
seq_printf(m, "ib ring tests failed (%d).\n", r);
else
- seq_printf(m, "ib ring tests passed.\n");
+ seq_puts(m, "ib ring tests passed.\n");
/* go on the scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -1581,7 +1751,7 @@ static int amdgpu_debugfs_benchmark(void *data, u64 val)
static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct amdgpu_device *adev = m->private;
struct drm_device *dev = adev_to_drm(adev);
struct drm_file *file;
int r;
@@ -1978,7 +2148,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_debugfs_ring_init(adev, ring);
}
- for ( i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (!amdgpu_vcnfw_log)
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5c7d40873ee2..e77f048c99d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -159,76 +159,11 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
return sysfs_emit(buf, "%llu\n", cnt);
}
-static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
+static DEVICE_ATTR(pcie_replay_count, 0444,
amdgpu_device_get_pcie_replay_count, NULL);
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
-/**
- * DOC: product_name
- *
- * The amdgpu driver provides a sysfs API for reporting the product name
- * for the device
- * The file product_name is used for this and returns the product name
- * as returned from the FRU.
- * NOTE: This is only available for certain server cards
- */
-
-static ssize_t amdgpu_device_get_product_name(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
-
- return sysfs_emit(buf, "%s\n", adev->product_name);
-}
-
-static DEVICE_ATTR(product_name, S_IRUGO,
- amdgpu_device_get_product_name, NULL);
-
-/**
- * DOC: product_number
- *
- * The amdgpu driver provides a sysfs API for reporting the part number
- * for the device
- * The file product_number is used for this and returns the part number
- * as returned from the FRU.
- * NOTE: This is only available for certain server cards
- */
-
-static ssize_t amdgpu_device_get_product_number(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
-
- return sysfs_emit(buf, "%s\n", adev->product_number);
-}
-
-static DEVICE_ATTR(product_number, S_IRUGO,
- amdgpu_device_get_product_number, NULL);
-
-/**
- * DOC: serial_number
- *
- * The amdgpu driver provides a sysfs API for reporting the serial number
- * for the device
- * The file serial_number is used for this and returns the serial number
- * as returned from the FRU.
- * NOTE: This is only available for certain server cards
- */
-
-static ssize_t amdgpu_device_get_serial_number(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(ddev);
-
- return sysfs_emit(buf, "%s\n", adev->serial);
-}
-
-static DEVICE_ATTR(serial_number, S_IRUGO,
- amdgpu_device_get_serial_number, NULL);
/**
* amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
@@ -370,10 +305,16 @@ size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
if (write) {
memcpy_toio(addr, buf, count);
+ /* Make sure HDP write cache flush happens without any reordering
+ * after the system memory contents are sent over PCIe device
+ */
mb();
amdgpu_device_flush_hdp(adev, NULL);
} else {
amdgpu_device_invalidate_hdp(adev, NULL);
+ /* Make sure HDP read cache is invalidated before issuing a read
+ * to the PCIe device
+ */
mb();
memcpy_fromio(buf, addr, count);
}
@@ -481,8 +422,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
/*
* MMIO register read with bytes helper functions
* @offset:bytes offset from MMIO start
- *
-*/
+ */
/**
* amdgpu_mm_rreg8 - read a memory mapped IO register
@@ -506,8 +446,8 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
* MMIO register write with bytes helper functions
* @offset:bytes offset from MMIO start
* @value: the value want to be written to the register
- *
-*/
+ */
+
/**
* amdgpu_mm_wreg8 - read a memory mapped IO register
*
@@ -571,7 +511,8 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
* this function is invoked only for the debugfs register access
*/
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
- uint32_t reg, uint32_t v)
+ uint32_t reg, uint32_t v,
+ uint32_t xcc_id)
{
if (amdgpu_device_skip_hw_access(adev))
return;
@@ -580,7 +521,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
adev->gfx.rlc.funcs &&
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
- return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
+ return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
} else if ((reg * 4) >= adev->rmmio_size) {
adev->pcie_wreg(adev, reg * 4, v);
} else {
@@ -589,94 +530,6 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
}
/**
- * amdgpu_mm_rdoorbell - read a doorbell dword
- *
- * @adev: amdgpu_device pointer
- * @index: doorbell index
- *
- * Returns the value in the doorbell aperture at the
- * requested doorbell index (CIK).
- */
-u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
-{
- if (amdgpu_device_skip_hw_access(adev))
- return 0;
-
- if (index < adev->doorbell.num_kernel_doorbells) {
- return readl(adev->doorbell.ptr + index);
- } else {
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
- return 0;
- }
-}
-
-/**
- * amdgpu_mm_wdoorbell - write a doorbell dword
- *
- * @adev: amdgpu_device pointer
- * @index: doorbell index
- * @v: value to write
- *
- * Writes @v to the doorbell aperture at the
- * requested doorbell index (CIK).
- */
-void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
-{
- if (amdgpu_device_skip_hw_access(adev))
- return;
-
- if (index < adev->doorbell.num_kernel_doorbells) {
- writel(v, adev->doorbell.ptr + index);
- } else {
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
- }
-}
-
-/**
- * amdgpu_mm_rdoorbell64 - read a doorbell Qword
- *
- * @adev: amdgpu_device pointer
- * @index: doorbell index
- *
- * Returns the value in the doorbell aperture at the
- * requested doorbell index (VEGA10+).
- */
-u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
-{
- if (amdgpu_device_skip_hw_access(adev))
- return 0;
-
- if (index < adev->doorbell.num_kernel_doorbells) {
- return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
- } else {
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
- return 0;
- }
-}
-
-/**
- * amdgpu_mm_wdoorbell64 - write a doorbell Qword
- *
- * @adev: amdgpu_device pointer
- * @index: doorbell index
- * @v: value to write
- *
- * Writes @v to the doorbell aperture at the
- * requested doorbell index (VEGA10+).
- */
-void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
-{
- if (amdgpu_device_skip_hw_access(adev))
- return;
-
- if (index < adev->doorbell.num_kernel_doorbells) {
- atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
- } else {
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
- }
-}
-
-/**
* amdgpu_device_indirect_rreg - read an indirect register
*
* @adev: amdgpu_device pointer
@@ -707,6 +560,48 @@ u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
return r;
}
+u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
+ u64 reg_addr)
+{
+ unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
+ u32 r;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_index_hi_offset;
+ void __iomem *pcie_data_offset;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+ if (adev->nbio.funcs->get_pcie_index_hi_offset)
+ pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
+ else
+ pcie_index_hi = 0;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+ if (pcie_index_hi != 0)
+ pcie_index_hi_offset = (void __iomem *)adev->rmmio +
+ pcie_index_hi * 4;
+
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ if (pcie_index_hi != 0) {
+ writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+ r = readl(pcie_data_offset);
+
+ /* clear the high bits */
+ if (pcie_index_hi != 0) {
+ writel(0, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ return r;
+}
+
/**
* amdgpu_device_indirect_rreg64 - read a 64bits indirect register
*
@@ -747,8 +642,6 @@ u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
* amdgpu_device_indirect_wreg - write an indirect register address
*
* @adev: amdgpu_device pointer
- * @pcie_index: mmio register offset
- * @pcie_data: mmio register offset
* @reg_addr: indirect register offset
* @reg_data: indirect register data
*
@@ -774,12 +667,50 @@ void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
+void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
+ u64 reg_addr, u32 reg_data)
+{
+ unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_index_hi_offset;
+ void __iomem *pcie_data_offset;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+ if (adev->nbio.funcs->get_pcie_index_hi_offset)
+ pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
+ else
+ pcie_index_hi = 0;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+ if (pcie_index_hi != 0)
+ pcie_index_hi_offset = (void __iomem *)adev->rmmio +
+ pcie_index_hi * 4;
+
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ if (pcie_index_hi != 0) {
+ writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+ writel(reg_data, pcie_data_offset);
+ readl(pcie_data_offset);
+
+ /* clear the high bits */
+ if (pcie_index_hi != 0) {
+ writel(0, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
/**
* amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
*
* @adev: amdgpu_device pointer
- * @pcie_index: mmio register offset
- * @pcie_data: mmio register offset
* @reg_addr: indirect register offset
* @reg_data: indirect register data
*
@@ -840,6 +771,13 @@ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
return 0;
}
+static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
+{
+ DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ BUG();
+ return 0;
+}
+
/**
* amdgpu_invalid_wreg - dummy reg write function
*
@@ -857,6 +795,13 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32
BUG();
}
+static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
+{
+ DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
+ reg, v);
+ BUG();
+}
+
/**
* amdgpu_invalid_rreg64 - dummy 64 bit reg read function
*
@@ -942,7 +887,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
amdgpu_asic_pre_asic_init(adev);
- if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) ||
+ adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
return amdgpu_atomfirmware_asic_init(adev, true);
else
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -985,7 +931,7 @@ static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
* @registers: pointer to the register array
* @array_size: size of the register array
*
- * Programs an array or registers with and and or masks.
+ * Programs an array or registers with and or masks.
* This is a helper for setting golden registers.
*/
void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
@@ -998,7 +944,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
if (array_size % 3)
return;
- for (i = 0; i < array_size; i +=3) {
+ for (i = 0; i < array_size; i += 3) {
reg = registers[i + 0];
and_mask = registers[i + 1];
or_mask = registers[i + 2];
@@ -1043,82 +989,6 @@ int amdgpu_device_pci_reset(struct amdgpu_device *adev)
}
/*
- * GPU doorbell aperture helpers function.
- */
-/**
- * amdgpu_device_doorbell_init - Init doorbell driver information.
- *
- * @adev: amdgpu_device pointer
- *
- * Init doorbell driver information (CIK)
- * Returns 0 on success, error on failure.
- */
-static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
-{
-
- /* No doorbell on SI hardware generation */
- if (adev->asic_type < CHIP_BONAIRE) {
- adev->doorbell.base = 0;
- adev->doorbell.size = 0;
- adev->doorbell.num_kernel_doorbells = 0;
- adev->doorbell.ptr = NULL;
- return 0;
- }
-
- if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
- return -EINVAL;
-
- amdgpu_asic_init_doorbell_index(adev);
-
- /* doorbell bar mapping */
- adev->doorbell.base = pci_resource_start(adev->pdev, 2);
- adev->doorbell.size = pci_resource_len(adev->pdev, 2);
-
- if (adev->enable_mes) {
- adev->doorbell.num_kernel_doorbells =
- adev->doorbell.size / sizeof(u32);
- } else {
- adev->doorbell.num_kernel_doorbells =
- min_t(u32, adev->doorbell.size / sizeof(u32),
- adev->doorbell_index.max_assignment+1);
- if (adev->doorbell.num_kernel_doorbells == 0)
- return -EINVAL;
-
- /* For Vega, reserve and map two pages on doorbell BAR since SDMA
- * paging queue doorbell use the second page. The
- * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
- * doorbells are in the first page. So with paging queue enabled,
- * the max num_kernel_doorbells should + 1 page (0x400 in dword)
- */
- if (adev->asic_type >= CHIP_VEGA10)
- adev->doorbell.num_kernel_doorbells += 0x400;
- }
-
- adev->doorbell.ptr = ioremap(adev->doorbell.base,
- adev->doorbell.num_kernel_doorbells *
- sizeof(u32));
- if (adev->doorbell.ptr == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down doorbell driver information (CIK)
- */
-static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
-{
- iounmap(adev->doorbell.ptr);
- adev->doorbell.ptr = NULL;
-}
-
-
-
-/*
* amdgpu_device_wb_*()
* Writeback is the method by which the GPU updates special pages in memory
* with the status of certain GPU events (fences, ring pointers,etc.).
@@ -1227,10 +1097,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
struct pci_bus *root;
struct resource *res;
- unsigned i;
+ unsigned int i;
u16 cmd;
int r;
+ if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
+ return 0;
+
/* Bypass for VF */
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1265,7 +1138,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
cmd & ~PCI_COMMAND_MEMORY);
/* Free the VRAM and doorbell BAR, we most likely need to move both. */
- amdgpu_device_doorbell_fini(adev);
+ amdgpu_doorbell_fini(adev);
if (adev->asic_type >= CHIP_BONAIRE)
pci_release_resource(adev->pdev, 2);
@@ -1282,7 +1155,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
/* When the doorbell or fb BAR isn't available we have no chance of
* using the device.
*/
- r = amdgpu_device_doorbell_init(adev);
+ r = amdgpu_doorbell_init(adev);
if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
return -ENODEV;
@@ -1291,6 +1164,14 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
return 0;
}
+static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
+{
+ if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
+ return false;
+
+ return true;
+}
+
/*
* GPU helpers function.
*/
@@ -1310,6 +1191,9 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return false;
+ if (!amdgpu_device_read_bios(adev))
+ return false;
+
if (amdgpu_passthrough(adev)) {
/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
* some old smc fw still need driver do vPost otherwise gpu hang, while
@@ -1319,6 +1203,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_FIJI) {
int err;
uint32_t fw_ver;
+
err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
/* force vPost if error occured */
if (err)
@@ -1352,6 +1237,51 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
}
+/*
+ * On APUs with >= 64GB white flickering has been observed w/ SG enabled.
+ * Disable S/G on such systems until we have a proper fix.
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2354
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2735
+ */
+bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
+{
+ switch (amdgpu_sg_display) {
+ case -1:
+ break;
+ case 0:
+ return false;
+ case 1:
+ return true;
+ default:
+ return false;
+ }
+ if ((totalram_pages() << (PAGE_SHIFT - 10)) +
+ (adev->gmc.real_vram_size / 1024) >= 64000000) {
+ DRM_WARN("Disabling S/G due to >=64GB RAM\n");
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
+ * speed switching. Until we have confirmation from Intel that a specific host
+ * supports it, it's safer that we keep it disabled for all.
+ *
+ * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
+ */
+bool amdgpu_device_pcie_dynamic_switching_supported(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (c->x86_vendor == X86_VENDOR_INTEL)
+ return false;
+#endif
+ return true;
+}
+
/**
* amdgpu_device_should_use_aspm - check if the device should program ASPM
*
@@ -1402,6 +1332,7 @@ static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
bool state)
{
struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
+
amdgpu_asic_set_vga_state(adev, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
@@ -1424,7 +1355,8 @@ static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
{
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
- * page table and the remaining bits are in the page directory */
+ * page table and the remaining bits are in the page directory
+ */
if (amdgpu_vm_block_size == -1)
return;
@@ -1547,7 +1479,7 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = 4;
- } else if (!is_power_of_2(amdgpu_sched_jobs)){
+ } else if (!is_power_of_2(amdgpu_sched_jobs)) {
dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
@@ -1656,7 +1588,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- /*
+ /*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
@@ -2194,7 +2126,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
- DRM_ERROR("disabled ip block: %d <%s>\n",
+ DRM_WARN("disabled ip block: %d <%s>\n",
i, adev->ip_blocks[i].version->funcs->name);
adev->ip_blocks[i].status.valid = false;
} else {
@@ -2220,14 +2152,16 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
return r;
/* Read BIOS */
- if (!amdgpu_get_bios(adev))
- return -EINVAL;
+ if (amdgpu_device_read_bios(adev)) {
+ if (!amdgpu_get_bios(adev))
+ return -EINVAL;
- r = amdgpu_atombios_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
- return r;
+ r = amdgpu_atombios_init(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_atombios_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
+ return r;
+ }
}
/*get pf2vf msg info at it's earliest time*/
@@ -2376,6 +2310,8 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
}
}
+ amdgpu_xcp_update_partition_sched_list(adev);
+
return 0;
}
@@ -2442,7 +2378,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = true;
/* right after GMC hw init, we create CSA */
- if (amdgpu_mcbp) {
+ if (adev->gfx.mcbp) {
r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
@@ -2533,8 +2469,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
goto init_failed;
/* Don't init kfd if whole hive need to be reset during init */
- if (!adev->gmc.xgmi.pending_reset)
+ if (!adev->gmc.xgmi.pending_reset) {
+ kgd2kfd_init_zone_device(adev);
amdgpu_amdkfd_device_init(adev);
+ }
amdgpu_fru_get_product_info(adev);
@@ -2759,8 +2697,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
- if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
- adev->asic_type == CHIP_ALDEBARAN ))
+ if (amdgpu_passthrough(adev) &&
+ ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
+ adev->asic_type == CHIP_ALDEBARAN))
amdgpu_dpm_handle_passthrough_sbr(adev, true);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
@@ -3089,7 +3028,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.hw = false;
/* handle putting the SMC in the appropriate state */
- if(!amdgpu_sriov_vf(adev)){
+ if (!amdgpu_sriov_vf(adev)) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
if (r) {
@@ -3294,7 +3233,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
*
* Main resume function for hardware IPs. The hardware IPs
* are split into two resume functions because they are
- * are also used in in recovering from a GPU reset and some additional
+ * also used in recovering from a GPU reset and some additional
* steps need to be take between them. In this case (S3/S4) they are
* run sequentially.
* Returns 0 on success, negative error code on failure.
@@ -3303,12 +3242,6 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
{
int r;
- if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume_iommu(adev);
- if (r)
- return r;
- }
-
r = amdgpu_device_ip_resume_phase1(adev);
if (r)
return r;
@@ -3396,8 +3329,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#else
default:
if (amdgpu_dc > 0)
- DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
- "but isn't supported by ASIC, ignoring\n");
+ DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
return false;
#endif
}
@@ -3553,13 +3485,28 @@ static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
}
static const struct attribute *amdgpu_dev_attributes[] = {
- &dev_attr_product_name.attr,
- &dev_attr_product_number.attr,
- &dev_attr_serial_number.attr,
&dev_attr_pcie_replay_count.attr,
NULL
};
+static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
+{
+ if (amdgpu_mcbp == 1)
+ adev->gfx.mcbp = true;
+ else if (amdgpu_mcbp == 0)
+ adev->gfx.mcbp = false;
+ else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
+ adev->gfx.num_gfx_rings)
+ adev->gfx.mcbp = true;
+
+ if (amdgpu_sriov_vf(adev))
+ adev->gfx.mcbp = true;
+
+ if (adev->gfx.mcbp)
+ DRM_INFO("MCBP is enabled\n");
+}
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -3608,6 +3555,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->smc_wreg = &amdgpu_invalid_wreg;
adev->pcie_rreg = &amdgpu_invalid_rreg;
adev->pcie_wreg = &amdgpu_invalid_wreg;
+ adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
+ adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
adev->pciep_rreg = &amdgpu_invalid_rreg;
adev->pciep_wreg = &amdgpu_invalid_wreg;
adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
@@ -3626,13 +3575,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
- * can recall function without having locking issues */
+ * can recall function without having locking issues
+ */
mutex_init(&adev->firmware.mutex);
mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex);
mutex_init(&adev->gfx.pipe_reserve_mutex);
mutex_init(&adev->gfx.gfx_off_mutex);
+ mutex_init(&adev->gfx.partition_mutex);
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
@@ -3702,16 +3653,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
- if (adev->rmmio == NULL) {
+ if (!adev->rmmio)
return -ENOMEM;
- }
- DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
- DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
-
- amdgpu_device_get_pcie_info(adev);
- if (amdgpu_mcbp)
- DRM_INFO("MCBP is enabled\n");
+ DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
+ DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
/*
* Reset domain needs to be present early, before XGMI hive discovered
@@ -3725,6 +3671,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* detect hw virtualization here */
amdgpu_detect_virtualization(adev);
+ amdgpu_device_get_pcie_info(adev);
+
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
@@ -3736,6 +3684,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ amdgpu_device_set_mcbp(adev);
+
/* Get rid of things like offb */
r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
if (r)
@@ -3753,26 +3703,29 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
/* enable PCIE atomic ops */
- if (amdgpu_sriov_vf(adev))
- adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
- adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
- (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ if (amdgpu_sriov_vf(adev)) {
+ if (adev->virt.fw_reserve.p_pf2vf)
+ adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
+ adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
+ (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
* internal path natively support atomics, set have_atomics_support to true.
*/
- else if ((adev->flags & AMD_IS_APU) &&
- (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)))
+ } else if ((adev->flags & AMD_IS_APU) &&
+ (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) {
adev->have_atomics_support = true;
- else
+ } else {
adev->have_atomics_support =
!pci_enable_atomic_ops_to_root(adev->pdev,
PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ }
+
if (!adev->have_atomics_support)
dev_info(adev->dev, "PCIE atomic ops is not supported\n");
/* doorbell bar mapping and doorbell index init*/
- amdgpu_device_doorbell_init(adev);
+ amdgpu_doorbell_init(adev);
if (amdgpu_emu_mode == 1) {
/* post the asic on emulation mode */
@@ -3783,7 +3736,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_reset_init(adev);
/* detect if we are with an SRIOV vbios */
- amdgpu_device_detect_sriov_bios(adev);
+ if (adev->bios)
+ amdgpu_device_detect_sriov_bios(adev);
/* check if we need to reset the asic
* E.g., driver was not cleanly unloaded previously, etc.
@@ -3835,25 +3789,27 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
- if (adev->is_atom_fw) {
- /* Initialize clocks */
- r = amdgpu_atomfirmware_get_clock_info(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
- goto failed;
- }
- } else {
- /* Initialize clocks */
- r = amdgpu_atombios_get_clock_info(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
- goto failed;
+ if (adev->bios) {
+ if (adev->is_atom_fw) {
+ /* Initialize clocks */
+ r = amdgpu_atomfirmware_get_clock_info(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ goto failed;
+ }
+ } else {
+ /* Initialize clocks */
+ r = amdgpu_atombios_get_clock_info(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ goto failed;
+ }
+ /* init i2c buses */
+ if (!amdgpu_device_has_dc_support(adev))
+ amdgpu_atombios_i2c_init(adev);
}
- /* init i2c buses */
- if (!amdgpu_device_has_dc_support(adev))
- amdgpu_atombios_i2c_init(adev);
}
fence_driver_init:
@@ -3896,6 +3852,11 @@ fence_driver_init:
/* Get a log2 for easy divisions. */
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
+ r = amdgpu_atombios_sysfs_init(adev);
+ if (r)
+ drm_err(&adev->ddev,
+ "registering atombios sysfs failed (%d).\n", r);
+
r = amdgpu_pm_sysfs_init(adev);
if (r)
DRM_ERROR("registering pm sysfs failed (%d).\n", r);
@@ -3907,14 +3868,6 @@ fence_driver_init:
} else
adev->ucode_sysfs_en = true;
- r = amdgpu_psp_sysfs_init(adev);
- if (r) {
- adev->psp_sysfs_en = false;
- if (!amdgpu_sriov_vf(adev))
- DRM_ERROR("Creating psp sysfs failed\n");
- } else
- adev->psp_sysfs_en = true;
-
/*
* Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
* Otherwise the mgpu fan boost feature will be skipped due to the
@@ -3947,6 +3900,8 @@ fence_driver_init:
if (r)
dev_err(adev->dev, "Could not create amdgpu device attr\n");
+ amdgpu_fru_sysfs_init(adev);
+
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
if (r)
@@ -3958,7 +3913,8 @@ fence_driver_init:
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
- * ignore it */
+ * ignore it
+ */
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
@@ -4010,7 +3966,7 @@ static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
/* Unmap all mapped bars - Doorbell, registers and VRAM */
- amdgpu_device_doorbell_fini(adev);
+ amdgpu_doorbell_fini(adev);
iounmap(adev->rmmio);
adev->rmmio = NULL;
@@ -4019,7 +3975,7 @@ static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
adev->mman.aper_base_kaddr = NULL;
/* Memory manager related */
- if (!adev->gmc.xgmi.connected_to_cpu) {
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
arch_phys_wc_del(adev->gmc.vram_mtrr);
arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
}
@@ -4041,7 +3997,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
/* make sure IB test finished before entering exclusive mode
* to avoid preemption on IB test
- * */
+ */
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_request_full_gpu(adev, false);
amdgpu_virt_fini_data_exchange(adev);
@@ -4049,7 +4005,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
- if (adev->mode_info.mode_config_initialized){
+ if (adev->mode_info.mode_config_initialized) {
if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
drm_helper_force_disable_all(adev_to_drm(adev));
else
@@ -4064,9 +4020,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_pm_sysfs_fini(adev);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
- if (adev->psp_sysfs_en)
- amdgpu_psp_sysfs_fini(adev);
sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
+ amdgpu_fru_sysfs_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -4124,7 +4079,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
iounmap(adev->rmmio);
adev->rmmio = NULL;
- amdgpu_device_doorbell_fini(adev);
+ amdgpu_doorbell_fini(adev);
drm_dev_exit(idx);
}
@@ -4205,6 +4160,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work);
+ flush_delayed_work(&adev->gfx.gfx_off_delay_work);
amdgpu_ras_suspend(adev);
@@ -4583,6 +4539,10 @@ retry:
r = amdgpu_virt_reset_gpu(adev);
if (r)
return r;
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+
+ /* some sw clean up VF needs to do before recover */
+ amdgpu_virt_post_reset(adev);
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
@@ -4609,7 +4569,6 @@ retry:
amdgpu_put_xgmi_hive(hive);
if (!r) {
- amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
amdgpu_amdkfd_post_reset(adev);
@@ -4714,42 +4673,42 @@ disabled:
int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
{
- u32 i;
- int ret = 0;
+ u32 i;
+ int ret = 0;
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
- dev_info(adev->dev, "GPU mode1 reset\n");
+ dev_info(adev->dev, "GPU mode1 reset\n");
- /* disable BM */
- pci_clear_master(adev->pdev);
+ /* disable BM */
+ pci_clear_master(adev->pdev);
- amdgpu_device_cache_pci_state(adev->pdev);
+ amdgpu_device_cache_pci_state(adev->pdev);
- if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
- dev_info(adev->dev, "GPU smu mode1 reset\n");
- ret = amdgpu_dpm_mode1_reset(adev);
- } else {
- dev_info(adev->dev, "GPU psp mode1 reset\n");
- ret = psp_gpu_reset(adev);
- }
+ if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
+ dev_info(adev->dev, "GPU smu mode1 reset\n");
+ ret = amdgpu_dpm_mode1_reset(adev);
+ } else {
+ dev_info(adev->dev, "GPU psp mode1 reset\n");
+ ret = psp_gpu_reset(adev);
+ }
- if (ret)
- dev_err(adev->dev, "GPU mode1 reset failed\n");
+ if (ret)
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
- amdgpu_device_load_pci_state(adev->pdev);
+ amdgpu_device_load_pci_state(adev->pdev);
- /* wait for asic to come out of reset */
- for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio.funcs->get_memsize(adev);
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
- if (memsize != 0xffffffff)
- break;
- udelay(1);
- }
+ if (memsize != 0xffffffff)
+ break;
+ udelay(1);
+ }
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
- return ret;
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+ return ret;
}
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
@@ -4777,8 +4736,9 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!ring || !ring->sched.thread)
continue;
- /*clear job fence from fence drv to avoid force_completion
- *leave NULL and vm flush fence in fence drv */
+ /* Clear job fence from fence drv to avoid force_completion
+ * leave NULL and vm flush fence in fence drv
+ */
amdgpu_fence_driver_clear_job_fences(ring);
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
@@ -4792,7 +4752,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
- if (r == -ENOSYS)
+ if (r == -EOPNOTSUPP)
r = 0;
else
return r;
@@ -4910,7 +4870,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
reset_context->reset_device_list = device_list_handle;
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
/* If reset handler not implemented, continue; otherwise return */
- if (r == -ENOSYS)
+ if (r == -EOPNOTSUPP)
r = 0;
else
return r;
@@ -4988,9 +4948,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
dev_warn(tmp_adev->dev, "asic atom init failed!");
} else {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_amdkfd_resume_iommu(tmp_adev);
- if (r)
- goto out;
r = amdgpu_device_ip_resume_phase1(tmp_adev);
if (r)
@@ -5399,9 +5356,8 @@ skip_hw_reset:
if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
amdgpu_mes_self_test(tmp_adev);
- if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
+ if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
- }
if (tmp_adev->asic_reset_res)
r = tmp_adev->asic_reset_res;
@@ -5478,7 +5434,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
/* covers APUs as well */
- if (pci_is_root_bus(adev->pdev->bus)) {
+ if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
if (adev->pm.pcie_gen_mask == 0)
adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
if (adev->pm.pcie_mlw_mask == 0)
@@ -5959,6 +5915,7 @@ void amdgpu_device_halt(struct amdgpu_device *adev)
struct pci_dev *pdev = adev->pdev;
struct drm_device *ddev = adev_to_drm(adev);
+ amdgpu_xcp_dev_unplug(adev);
drm_dev_unplug(ddev);
amdgpu_irq_disable_all(adev);
@@ -6079,3 +6036,31 @@ bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
return true;
}
}
+
+uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
+ uint32_t inst, uint32_t reg_addr, char reg_name[],
+ uint32_t expected_value, uint32_t mask)
+{
+ uint32_t ret = 0;
+ uint32_t old_ = 0;
+ uint32_t tmp_ = RREG32(reg_addr);
+ uint32_t loop = adev->usec_timeout;
+
+ while ((tmp_ & (mask)) != (expected_value)) {
+ if (old_ != tmp_) {
+ loop = adev->usec_timeout;
+ old_ = tmp_;
+ } else
+ udelay(1);
+ tmp_ = RREG32(reg_addr);
+ loop--;
+ if (!loop) {
+ DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
+ inst, reg_name, (uint32_t)expected_value,
+ (uint32_t)(tmp_ & (mask)));
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 0ecce0b92b82..74ffe6581c85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -30,6 +30,7 @@
#include "soc15.h"
#include "gfx_v9_0.h"
+#include "gfx_v9_4_3.h"
#include "gmc_v9_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
@@ -64,6 +65,7 @@
#include "soc21.h"
#include "navi10_ih.h"
#include "ih_v6_0.h"
+#include "ih_v6_1.h"
#include "gfx_v10_0.h"
#include "gfx_v11_0.h"
#include "sdma_v5_0.h"
@@ -76,12 +78,15 @@
#include "jpeg_v3_0.h"
#include "vcn_v4_0.h"
#include "jpeg_v4_0.h"
+#include "vcn_v4_0_3.h"
+#include "jpeg_v4_0_3.h"
#include "amdgpu_vkms.h"
#include "mes_v10_1.h"
#include "mes_v11_0.h"
#include "smuio_v11_0.h"
#include "smuio_v11_0_6.h"
#include "smuio_v13_0.h"
+#include "smuio_v13_0_3.h"
#include "smuio_v13_0_6.h"
#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
@@ -200,14 +205,44 @@ static int hw_id_map[MAX_HWIP] = {
[PCIE_HWIP] = PCIE_HWID,
};
-static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary)
+static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
+{
+ u64 tmr_offset, tmr_size, pos;
+ void *discv_regn;
+ int ret;
+
+ ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
+ if (ret)
+ return ret;
+
+ pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
+
+ /* This region is read-only and reserved from system use */
+ discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
+ if (discv_regn) {
+ memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
+ memunmap(discv_regn);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ uint8_t *binary)
{
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
- uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+ int ret = 0;
- amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
- adev->mman.discovery_tmr_size, false);
- return 0;
+ if (vram_size) {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->mman.discovery_tmr_size, false);
+ } else {
+ ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
+ }
+
+ return ret;
}
static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
@@ -280,6 +315,7 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
case 0xCF:
case 0xDF:
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
break;
default:
break;
@@ -301,33 +337,30 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (!adev->mman.discovery_bin)
return -ENOMEM;
- r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin);
- if (r) {
- dev_err(adev->dev, "failed to read ip discovery binary from vram\n");
- r = -EINVAL;
- goto out;
- }
-
- if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin) || amdgpu_discovery == 2) {
- /* ignore the discovery binary from vram if discovery=2 in kernel module parameter */
- if (amdgpu_discovery == 2)
- dev_info(adev->dev,"force read ip discovery binary from file");
- else
- dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
-
- /* retry read ip discovery binary from file */
+ /* Read from file if it is the preferred option */
+ if (amdgpu_discovery == 2) {
+ dev_info(adev->dev, "use ip discovery information from file");
r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
+
if (r) {
dev_err(adev->dev, "failed to read ip discovery binary from file\n");
r = -EINVAL;
goto out;
}
- /* check the ip discovery binary signature */
- if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
- dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n");
- r = -EINVAL;
+
+ } else {
+ r = amdgpu_discovery_read_binary_from_mem(
+ adev, adev->mman.discovery_bin);
+ if (r)
goto out;
- }
+ }
+
+ /* check the ip discovery binary signature */
+ if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ dev_err(adev->dev,
+ "get invalid ip discovery binary signature\n");
+ r = -EINVAL;
+ goto out;
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
@@ -471,11 +504,11 @@ void amdgpu_discovery_fini(struct amdgpu_device *adev)
adev->mman.discovery_bin = NULL;
}
-static int amdgpu_discovery_validate_ip(const struct ip *ip)
+static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
{
- if (ip->number_instance >= HWIP_MAX_INSTANCE) {
- DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n",
- ip->number_instance);
+ if (ip->instance_number >= HWIP_MAX_INSTANCE) {
+ DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
+ ip->instance_number);
return -EINVAL;
}
if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
@@ -493,7 +526,7 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
- struct ip *ip;
+ struct ip_v4 *ip;
uint16_t die_offset, ip_offset, num_dies, num_ips;
int i, j;
@@ -510,29 +543,41 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
if (amdgpu_discovery_validate_ip(ip))
goto next_ip;
- if (le16_to_cpu(ip->harvest) == 1) {
+ if (le16_to_cpu(ip->variant) == 1) {
switch (le16_to_cpu(ip->hw_id)) {
case VCN_HWID:
(*vcn_harvest_count)++;
- if (ip->number_instance == 0)
+ if (ip->instance_number == 0) {
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
- else
+ adev->vcn.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN0;
+ adev->jpeg.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN0;
+ } else {
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ adev->vcn.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN1;
+ adev->jpeg.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN1;
+ }
break;
case DMU_HWID:
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
break;
default:
break;
- }
- }
+ }
+ }
next_ip:
- ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ if (ihdr->base_addr_64_bit)
+ ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
}
}
}
@@ -564,10 +609,15 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
case VCN_HWID:
(*vcn_harvest_count)++;
- if (harvest_info->list[i].number_instance == 0)
- adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
- else
- adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ adev->vcn.harvest_config |=
+ (1 << harvest_info->list[i].number_instance);
+ adev->jpeg.harvest_config |=
+ (1 << harvest_info->list[i].number_instance);
+
+ adev->vcn.inst_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ adev->jpeg.inst_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
break;
case DMU_HWID:
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
@@ -577,6 +627,14 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
1 << (le16_to_cpu(harvest_info->list[i].number_instance));
(*umc_harvest_count)++;
break;
+ case GC_HWID:
+ adev->gfx.xcc_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ case SDMA0_HWID:
+ adev->sdma.sdma_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
default:
break;
}
@@ -836,9 +894,40 @@ static void ip_disc_release(struct kobject *kobj)
kfree(ip_top);
}
+static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
+ uint16_t hw_id, uint8_t inst)
+{
+ uint8_t harvest = 0;
+
+ /* Until a uniform way is figured, get mask based on hwid */
+ switch (hw_id) {
+ case VCN_HWID:
+ harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
+ break;
+ case DMU_HWID:
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
+ harvest = 0x1;
+ break;
+ case UMC_HWID:
+ /* TODO: It needs another parsing; for now, ignore.*/
+ break;
+ case GC_HWID:
+ harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
+ break;
+ case SDMA0_HWID:
+ harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
+ break;
+ default:
+ break;
+ }
+
+ return harvest;
+}
+
static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
struct ip_die_entry *ip_die_entry,
- const size_t _ip_offset, const int num_ips)
+ const size_t _ip_offset, const int num_ips,
+ bool reg_base_64)
{
int ii, jj, kk, res;
@@ -852,10 +941,10 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
size_t ip_offset = _ip_offset;
for (jj = 0; jj < num_ips; jj++) {
- struct ip *ip;
+ struct ip_v4 *ip;
struct ip_hw_instance *ip_hw_instance;
- ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
if (amdgpu_discovery_validate_ip(ip) ||
le16_to_cpu(ip->hw_id) != ii)
goto next_ip;
@@ -903,22 +992,35 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
return -ENOMEM;
}
ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
- ip_hw_instance->num_instance = ip->number_instance;
+ ip_hw_instance->num_instance = ip->instance_number;
ip_hw_instance->major = ip->major;
ip_hw_instance->minor = ip->minor;
ip_hw_instance->revision = ip->revision;
- ip_hw_instance->harvest = ip->harvest;
+ ip_hw_instance->harvest =
+ amdgpu_discovery_get_harvest_info(
+ adev, ip_hw_instance->hw_id,
+ ip_hw_instance->num_instance);
ip_hw_instance->num_base_addresses = ip->num_base_address;
- for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++)
- ip_hw_instance->base_addr[kk] = ip->base_address[kk];
+ for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
+ if (reg_base_64)
+ ip_hw_instance->base_addr[kk] =
+ lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
+ else
+ ip_hw_instance->base_addr[kk] = ip->base_address[kk];
+ }
kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
res = kobject_add(&ip_hw_instance->kobj, NULL,
"%d", ip_hw_instance->num_instance);
next_ip:
- ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ if (reg_base_64)
+ ip_offset += struct_size(ip, base_address_64,
+ ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address,
+ ip->num_base_address);
}
}
@@ -972,7 +1074,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
return res;
}
- amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips);
+ amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
}
return 0;
@@ -983,6 +1085,9 @@ static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
struct kset *die_kset;
int res, ii;
+ if (!adev->mman.discovery_bin)
+ return -EINVAL;
+
adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
if (!adev->ip_top)
return -ENOMEM;
@@ -1082,7 +1187,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
- struct ip *ip;
+ struct ip_v4 *ip;
uint16_t die_offset;
uint16_t ip_offset;
uint16_t num_dies;
@@ -1098,6 +1203,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
return r;
}
+ adev->gfx.xcc_mask = 0;
+ adev->sdma.sdma_mask = 0;
+ adev->vcn.inst_mask = 0;
+ adev->jpeg.inst_mask = 0;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
@@ -1121,7 +1230,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
if (amdgpu_discovery_validate_ip(ip))
goto next_ip;
@@ -1131,7 +1240,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
hw_id_names[le16_to_cpu(ip->hw_id)],
le16_to_cpu(ip->hw_id),
- ip->number_instance,
+ ip->instance_number,
ip->major, ip->minor,
ip->revision);
@@ -1145,23 +1254,33 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
ip->revision & 0xc0;
ip->revision &= ~0xc0;
- if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES)
+ if (adev->vcn.num_vcn_inst <
+ AMDGPU_MAX_VCN_INSTANCES) {
adev->vcn.num_vcn_inst++;
- else
+ adev->vcn.inst_mask |=
+ (1U << ip->instance_number);
+ adev->jpeg.inst_mask |=
+ (1U << ip->instance_number);
+ } else {
dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
adev->vcn.num_vcn_inst + 1,
AMDGPU_MAX_VCN_INSTANCES);
+ }
}
if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
- if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES)
+ if (adev->sdma.num_instances <
+ AMDGPU_MAX_SDMA_INSTANCES) {
adev->sdma.num_instances++;
- else
+ adev->sdma.sdma_mask |=
+ (1U << ip->instance_number);
+ } else {
dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
adev->sdma.num_instances + 1,
AMDGPU_MAX_SDMA_INSTANCES);
+ }
}
if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
@@ -1169,20 +1288,38 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->umc.node_inst_num++;
}
+ if (le16_to_cpu(ip->hw_id) == GC_HWID)
+ adev->gfx.xcc_mask |=
+ (1U << ip->instance_number);
+
for (k = 0; k < num_base_address; k++) {
/*
* convert the endianness of base addresses in place,
* so that we don't need to convert them when accessing adev->reg_offset.
*/
- ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
+ if (ihdr->base_addr_64_bit)
+ /* Truncate the 64bit base address from ip discovery
+ * and only store lower 32bit ip base in reg_offset[].
+ * Bits > 32 follows ASIC specific format, thus just
+ * discard them and handle it within specific ASIC.
+ * By this way reg_offset[] and related helpers can
+ * stay unchanged.
+ * The base address is in dwords, thus clear the
+ * highest 2 bits to store.
+ */
+ ip->base_address[k] =
+ lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
+ else
+ ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
}
for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
- if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
+ if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
+ hw_id_map[hw_ip] != 0) {
DRM_DEBUG("set register base offset for %s\n",
hw_id_names[le16_to_cpu(ip->hw_id)]);
- adev->reg_offset[hw_ip][ip->number_instance] =
+ adev->reg_offset[hw_ip][ip->instance_number] =
ip->base_address;
/* Instance support is somewhat inconsistent.
* SDMA is a good example. Sienna cichlid has 4 total
@@ -1193,69 +1330,22 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
* example. On most chips there are multiple instances
* with the same HWID.
*/
- adev->ip_versions[hw_ip][ip->number_instance] =
+ adev->ip_versions[hw_ip][ip->instance_number] =
IP_VERSION(ip->major, ip->minor, ip->revision);
}
}
next_ip:
- ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ if (ihdr->base_addr_64_bit)
+ ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
}
}
- amdgpu_discovery_sysfs_init(adev);
-
return 0;
}
-int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
- int *major, int *minor, int *revision)
-{
- struct binary_header *bhdr;
- struct ip_discovery_header *ihdr;
- struct die_header *dhdr;
- struct ip *ip;
- uint16_t die_offset;
- uint16_t ip_offset;
- uint16_t num_dies;
- uint16_t num_ips;
- int i, j;
-
- if (!adev->mman.discovery_bin) {
- DRM_ERROR("ip discovery uninitialized\n");
- return -EINVAL;
- }
-
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
- num_dies = le16_to_cpu(ihdr->num_dies);
-
- for (i = 0; i < num_dies; i++) {
- die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
- num_ips = le16_to_cpu(dhdr->num_ips);
- ip_offset = die_offset + sizeof(*dhdr);
-
- for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
-
- if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) {
- if (major)
- *major = ip->major;
- if (minor)
- *minor = ip->minor;
- if (revision)
- *revision = ip->revision;
- return 0;
- }
- ip_offset += struct_size(ip, base_address, ip->num_base_address);
- }
- }
-
- return -EINVAL;
-}
-
static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
int vcn_harvest_count = 0;
@@ -1266,7 +1356,8 @@ static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
* so read harvest bit per IP data structure to set
* harvest configuration.
*/
- if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) {
+ if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0) &&
+ adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) {
if ((adev->pdev->device == 0x731E &&
(adev->pdev->revision == 0xC6 ||
adev->pdev->revision == 0xC7)) ||
@@ -1425,6 +1516,7 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
mall_size += mall_size_per_umc;
}
adev->gmc.mall_size = mall_size;
+ adev->gmc.m_half_use = half_use;
break;
default:
dev_err(adev->dev,
@@ -1611,6 +1703,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 2):
amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
break;
+ case IP_VERSION(6, 1, 0):
+ amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
@@ -1659,6 +1754,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
+ case IP_VERSION(14, 0, 0):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
case IP_VERSION(13, 0, 4):
@@ -1706,6 +1802,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
@@ -1804,6 +1901,11 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
break;
+ case IP_VERSION(9, 4, 3):
+ if (!amdgpu_exp_hw_support)
+ return -EINVAL;
+ amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
+ break;
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 1):
@@ -1871,6 +1973,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
+ case IP_VERSION(6, 1, 0):
amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
break;
default:
@@ -1939,7 +2042,6 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 0, 2):
- case IP_VERSION(3, 0, 192):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
@@ -1952,7 +2054,11 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 0, 4):
amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
- return 0;
+ break;
+ case IP_VERSION(4, 0, 3):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
@@ -2000,6 +2106,17 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
return 0;
}
+static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 3):
+ aqua_vanjaram_init_soc_config(adev);
+ break;
+ default:
+ break;
+ }
+}
+
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
@@ -2177,6 +2294,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
}
+ amdgpu_discovery_init_soc_config(adev);
+ amdgpu_discovery_sysfs_init(adev);
+
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 2, 1):
@@ -2333,6 +2453,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 1, 0):
adev->hdp.funcs = &hdp_v6_0_funcs;
break;
default:
@@ -2387,8 +2508,15 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 2):
adev->smuio.funcs = &smuio_v13_0_funcs;
break;
+ case IP_VERSION(13, 0, 3):
+ adev->smuio.funcs = &smuio_v13_0_3_funcs;
+ if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
+ adev->flags |= AMD_IS_APU;
+ }
+ break;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 8):
+ case IP_VERSION(14, 0, 0):
adev->smuio.funcs = &smuio_v13_0_6_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 8563dd4a7dc2..3a2f347bd50d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -24,12 +24,10 @@
#ifndef __AMDGPU_DISCOVERY__
#define __AMDGPU_DISCOVERY__
-#define DISCOVERY_TMR_SIZE (4 << 10)
+#define DISCOVERY_TMR_SIZE (8 << 10)
#define DISCOVERY_TMR_OFFSET (64 << 10)
void amdgpu_discovery_fini(struct amdgpu_device *adev);
-int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
- int *major, int *minor, int *revision);
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index d60fe7eb5579..d20dd3f852fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -98,7 +98,7 @@ static void amdgpu_display_flip_callback(struct dma_fence *f,
static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
struct dma_fence **f)
{
- struct dma_fence *fence= *f;
+ struct dma_fence *fence = *f;
if (fence == NULL)
return false;
@@ -124,7 +124,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &amdgpu_crtc->base;
unsigned long flags;
- unsigned i;
+ unsigned int i;
int vpos, hpos;
for (i = 0; i < work->shared_count; ++i)
@@ -201,7 +201,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
u64 tiling_flags;
int i, r;
- work = kzalloc(sizeof *work, GFP_KERNEL);
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
@@ -332,13 +332,15 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref,
- take the current one */
+ * take the current one
+ */
if (active && !adev->have_disp_power_ref) {
adev->have_disp_power_ref = true;
return ret;
}
/* if we have no active crtcs, then drop the power ref
- we got before */
+ * we got before
+ */
if (!active && adev->have_disp_power_ref) {
pm_runtime_put_autosuspend(dev->dev);
adev->have_disp_power_ref = false;
@@ -507,11 +509,10 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
if (amdgpu_connector->router.ddc_valid)
amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
- if (use_aux) {
+ if (use_aux)
ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
- } else {
+ else
ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
- }
if (ret != 2)
/* Couldn't find an accessible DDC on this connector */
@@ -520,10 +521,12 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
* EDID header starts with:
* 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
* Only the first 6 bytes must be valid as
- * drm_edid_block_valid() can fix the last 2 bytes */
+ * drm_edid_block_valid() can fix the last 2 bytes
+ */
if (drm_edid_header_is_valid(buf) < 6) {
/* Couldn't find an accessible EDID on this
- * connector */
+ * connector
+ */
return false;
}
return true;
@@ -1216,8 +1219,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
- drm_dbg_kms(dev, "No GEM object associated to handle 0x%08X, "
- "can't create framebuffer\n", mode_cmd->handles[0]);
+ drm_dbg_kms(dev,
+ "No GEM object associated to handle 0x%08X, can't create framebuffer\n",
+ mode_cmd->handles[0]);
+
return ERR_PTR(-ENOENT);
}
@@ -1252,21 +1257,21 @@ const struct drm_mode_config_funcs amdgpu_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
};
-static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
-{ { UNDERSCAN_OFF, "off" },
+static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = {
+ { UNDERSCAN_OFF, "off" },
{ UNDERSCAN_ON, "on" },
{ UNDERSCAN_AUTO, "auto" },
};
-static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
-{ { AMDGPU_AUDIO_DISABLE, "off" },
+static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = {
+ { AMDGPU_AUDIO_DISABLE, "off" },
{ AMDGPU_AUDIO_ENABLE, "on" },
{ AMDGPU_AUDIO_AUTO, "auto" },
};
/* XXX support different dither options? spatial, temporal, both, etc. */
-static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
-{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
+static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
+ { AMDGPU_FMT_DITHER_DISABLE, "off" },
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};
@@ -1410,6 +1415,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
}
if (amdgpu_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
+
a.full = dfixed_const(src_v);
b.full = dfixed_const(dst_v);
amdgpu_crtc->vsc.full = dfixed_div(a, b);
@@ -1429,7 +1435,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
*
* \param dev Device to query.
* \param pipe Crtc to query.
- * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
* For driver internal use only also supports these flags:
*
* USE_REAL_VBLANKSTART to use the real start of vblank instead
@@ -1496,8 +1502,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
ret |= DRM_SCANOUTPOS_ACCURATE;
vbl_start = vbl & 0x1fff;
vbl_end = (vbl >> 16) & 0x1fff;
- }
- else {
+ } else {
/* No: Fake something reasonable which gives at least ok results. */
vbl_start = mode->crtc_vdisplay;
vbl_end = 0;
@@ -1505,8 +1510,8 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
/* Called from driver internal vblank counter query code? */
if (flags & GET_DISTANCE_TO_VBLANKSTART) {
- /* Caller wants distance from real vbl_start in *hpos */
- *hpos = *vpos - vbl_start;
+ /* Caller wants distance from real vbl_start in *hpos */
+ *hpos = *vpos - vbl_start;
}
/* Fudge vblank to start a few scanlines earlier to handle the
@@ -1528,7 +1533,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
/* In vblank? */
if (in_vbl)
- ret |= DRM_SCANOUTPOS_IN_VBLANK;
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
/* Called from driver internal vblank counter query code? */
if (flags & GET_DISTANCE_TO_VBLANKSTART) {
@@ -1636,6 +1641,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
amdgpu_bo_unpin(aobj);
@@ -1643,9 +1649,9 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
}
}
- if (fb == NULL || fb->obj[0] == NULL) {
+ if (!fb || !fb->obj[0])
continue;
- }
+
robj = gem_to_amdgpu_bo(fb->obj[0]);
if (!amdgpu_display_robj_is_fb(adev, robj)) {
r = amdgpu_bo_reserve(robj, true);
@@ -1672,6 +1678,7 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 0c001bb8fc2b..12210598e5b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -149,7 +149,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
if (!bo->tbo.pin_count) {
/* move buffer into GTT or VRAM */
struct ttm_operation_ctx ctx = { false, false };
- unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
+ unsigned int domains = AMDGPU_GEM_DOMAIN_GTT;
if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
attach->peer2peer) {
@@ -336,7 +336,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_CPU, flags,
- ttm_bo_type_sg, resv, &gobj);
+ ttm_bo_type_sg, resv, &gobj, 0);
if (ret)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
index 8fd11497faba..09f6727e7c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -31,10 +31,15 @@ struct amdgpu_doorbell {
/* doorbell mmio */
resource_size_t base;
resource_size_t size;
- u32 __iomem *ptr;
/* Number of doorbells reserved for amdgpu kernel driver */
u32 num_kernel_doorbells;
+
+ /* Kernel doorbells */
+ struct amdgpu_bo *kernel_doorbells;
+
+ /* For CPU access of doorbells */
+ uint32_t *cpu_addr;
};
/* Reserved doorbells for amdgpu (including multimedia).
@@ -59,7 +64,7 @@ struct amdgpu_doorbell_index {
uint32_t gfx_ring1;
uint32_t gfx_userqueue_start;
uint32_t gfx_userqueue_end;
- uint32_t sdma_engine[8];
+ uint32_t sdma_engine[16];
uint32_t mes_ring0;
uint32_t mes_ring1;
uint32_t ih;
@@ -86,10 +91,11 @@ struct amdgpu_doorbell_index {
uint32_t max_assignment;
/* Per engine SDMA doorbell size in dword */
uint32_t sdma_doorbell_range;
+ /* Per xcc doorbell size for KIQ/KCQ */
+ uint32_t xcc_doorbell_range;
};
-typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
-{
+enum AMDGPU_DOORBELL_ASSIGNMENT {
AMDGPU_DOORBELL_KIQ = 0x000,
AMDGPU_DOORBELL_HIQ = 0x001,
AMDGPU_DOORBELL_DIQ = 0x002,
@@ -107,10 +113,10 @@ typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
AMDGPU_DOORBELL_IH = 0x1E8,
AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
AMDGPU_DOORBELL_INVALID = 0xFFFF
-} AMDGPU_DOORBELL_ASSIGNMENT;
+};
+
+enum AMDGPU_VEGA20_DOORBELL_ASSIGNMENT {
-typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
-{
/* Compute + GFX: 0~255 */
AMDGPU_VEGA20_DOORBELL_KIQ = 0x000,
AMDGPU_VEGA20_DOORBELL_HIQ = 0x001,
@@ -164,12 +170,20 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0,
AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7,
- AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F,
+ /* kiq/kcq from second XCD. Max 8 XCDs */
+ AMDGPU_VEGA20_DOORBELL_XCC1_KIQ_START = 0x190,
+ /* 8 compute rings per GC. Max to 0x1CE */
+ AMDGPU_VEGA20_DOORBELL_XCC1_MEC_RING0_START = 0x197,
+
+ /* AID1 SDMA: 0x1D0 ~ 0x1F7 */
+ AMDGPU_VEGA20_DOORBELL_AID1_sDMA_START = 0x1D0,
+
+ AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x1F7,
AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF
-} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
+};
+
+enum AMDGPU_NAVI10_DOORBELL_ASSIGNMENT {
-typedef enum _AMDGPU_NAVI10_DOORBELL_ASSIGNMENT
-{
/* Compute + GFX: 0~255 */
AMDGPU_NAVI10_DOORBELL_KIQ = 0x000,
AMDGPU_NAVI10_DOORBELL_HIQ = 0x001,
@@ -217,13 +231,12 @@ typedef enum _AMDGPU_NAVI10_DOORBELL_ASSIGNMENT
AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT = 0x18F,
AMDGPU_NAVI10_DOORBELL_INVALID = 0xFFFF
-} AMDGPU_NAVI10_DOORBELL_ASSIGNMENT;
+};
/*
* 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
*/
-typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
-{
+enum AMDGPU_DOORBELL64_ASSIGNMENT {
/*
* All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
* a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
@@ -299,13 +312,54 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
AMDGPU_DOORBELL64_INVALID = 0xFFFF
-} AMDGPU_DOORBELL64_ASSIGNMENT;
+};
+
+enum AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 {
+
+ /* XCC0: 0x00 ~20, XCC1: 20 ~ 2F ... */
+
+ /* KIQ/HIQ/DIQ */
+ AMDGPU_DOORBELL_LAYOUT1_KIQ_START = 0x000,
+ AMDGPU_DOORBELL_LAYOUT1_HIQ = 0x001,
+ AMDGPU_DOORBELL_LAYOUT1_DIQ = 0x002,
+ /* Compute: 0x08 ~ 0x20 */
+ AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START = 0x008,
+ AMDGPU_DOORBELL_LAYOUT1_MEC_RING_END = 0x00F,
+ AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START = 0x010,
+ AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END = 0x01F,
+ AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE = 0x020,
+
+ /* SDMA: 0x100 ~ 0x19F */
+ AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START = 0x100,
+ AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_END = 0x19F,
+ /* IH: 0x1A0 ~ 0x1AF */
+ AMDGPU_DOORBELL_LAYOUT1_IH = 0x1A0,
+ /* VCN: 0x1B0 ~ 0x1E8 */
+ AMDGPU_DOORBELL_LAYOUT1_VCN_START = 0x1B0,
+ AMDGPU_DOORBELL_LAYOUT1_VCN_END = 0x1E8,
+
+ AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START,
+ AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_VCN_END,
+
+ AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT = 0x1E8,
+ AMDGPU_DOORBELL_LAYOUT1_INVALID = 0xFFFF
+};
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+/*
+ * GPU doorbell aperture helpers function.
+ */
+int amdgpu_doorbell_init(struct amdgpu_device *adev);
+void amdgpu_doorbell_fini(struct amdgpu_device *adev);
+int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev);
+uint32_t amdgpu_doorbell_index_on_bar(struct amdgpu_device *adev,
+ struct amdgpu_bo *db_bo,
+ uint32_t doorbell_index);
+
#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
new file mode 100644
index 000000000000..da4be0bbb446
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+
+/**
+ * amdgpu_mm_rdoorbell - read a doorbell dword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ *
+ * Returns the value in the doorbell aperture at the
+ * requested doorbell index (CIK).
+ */
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
+{
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (index < adev->doorbell.num_kernel_doorbells)
+ return readl(adev->doorbell.cpu_addr + index);
+
+ DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ return 0;
+}
+
+/**
+ * amdgpu_mm_wdoorbell - write a doorbell dword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ * @v: value to write
+ *
+ * Writes @v to the doorbell aperture at the
+ * requested doorbell index (CIK).
+ */
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
+{
+ if (amdgpu_device_skip_hw_access(adev))
+ return;
+
+ if (index < adev->doorbell.num_kernel_doorbells)
+ writel(v, adev->doorbell.cpu_addr + index);
+ else
+ DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+}
+
+/**
+ * amdgpu_mm_rdoorbell64 - read a doorbell Qword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ *
+ * Returns the value in the doorbell aperture at the
+ * requested doorbell index (VEGA10+).
+ */
+u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
+{
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (index < adev->doorbell.num_kernel_doorbells)
+ return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index));
+
+ DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ return 0;
+}
+
+/**
+ * amdgpu_mm_wdoorbell64 - write a doorbell Qword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ * @v: value to write
+ *
+ * Writes @v to the doorbell aperture at the
+ * requested doorbell index (VEGA10+).
+ */
+void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
+{
+ if (amdgpu_device_skip_hw_access(adev))
+ return;
+
+ if (index < adev->doorbell.num_kernel_doorbells)
+ atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v);
+ else
+ DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+}
+
+/**
+ * amdgpu_doorbell_index_on_bar - Find doorbell's absolute offset in BAR
+ *
+ * @adev: amdgpu_device pointer
+ * @db_bo: doorbell object's bo
+ * @db_index: doorbell relative index in this doorbell object
+ *
+ * returns doorbell's absolute index in BAR
+ */
+uint32_t amdgpu_doorbell_index_on_bar(struct amdgpu_device *adev,
+ struct amdgpu_bo *db_bo,
+ uint32_t doorbell_index)
+{
+ int db_bo_offset;
+
+ db_bo_offset = amdgpu_bo_gpu_offset_no_check(db_bo);
+
+ /* doorbell index is 32 bit but doorbell's size is 64-bit, so *2 */
+ return db_bo_offset / sizeof(u32) + doorbell_index * 2;
+}
+
+/**
+ * amdgpu_doorbell_create_kernel_doorbells - Create kernel doorbells for graphics
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Creates doorbells for graphics driver usages.
+ * returns 0 on success, error otherwise.
+ */
+int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
+{
+ int r;
+ int size;
+
+ /* Reserve first num_kernel_doorbells (page-aligned) for kernel ops */
+ size = ALIGN(adev->doorbell.num_kernel_doorbells * sizeof(u32), PAGE_SIZE);
+
+ /* Allocate an extra page for MES kernel usages (ring test) */
+ adev->mes.db_start_dw_offset = size / sizeof(u32);
+ size += PAGE_SIZE;
+
+ r = amdgpu_bo_create_kernel(adev,
+ size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_DOORBELL,
+ &adev->doorbell.kernel_doorbells,
+ NULL,
+ (void **)&adev->doorbell.cpu_addr);
+ if (r) {
+ DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r);
+ return r;
+ }
+
+ adev->doorbell.num_kernel_doorbells = size / sizeof(u32);
+ return 0;
+}
+
+/*
+ * GPU doorbell aperture helpers function.
+ */
+/**
+ * amdgpu_doorbell_init - Init doorbell driver information.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Init doorbell driver information (CIK)
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_doorbell_init(struct amdgpu_device *adev)
+{
+
+ /* No doorbell on SI hardware generation */
+ if (adev->asic_type < CHIP_BONAIRE) {
+ adev->doorbell.base = 0;
+ adev->doorbell.size = 0;
+ adev->doorbell.num_kernel_doorbells = 0;
+ return 0;
+ }
+
+ if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
+ return -EINVAL;
+
+ amdgpu_asic_init_doorbell_index(adev);
+
+ /* doorbell bar mapping */
+ adev->doorbell.base = pci_resource_start(adev->pdev, 2);
+ adev->doorbell.size = pci_resource_len(adev->pdev, 2);
+
+ adev->doorbell.num_kernel_doorbells =
+ min_t(u32, adev->doorbell.size / sizeof(u32),
+ adev->doorbell_index.max_assignment + 1);
+ if (adev->doorbell.num_kernel_doorbells == 0)
+ return -EINVAL;
+
+ /*
+ * For Vega, reserve and map two pages on doorbell BAR since SDMA
+ * paging queue doorbell use the second page. The
+ * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
+ * doorbells are in the first page. So with paging queue enabled,
+ * the max num_kernel_doorbells should + 1 page (0x400 in dword)
+ */
+ if (adev->asic_type >= CHIP_VEGA10)
+ adev->doorbell.num_kernel_doorbells += 0x400;
+
+ return 0;
+}
+
+/**
+ * amdgpu_doorbell_fini - Tear down doorbell driver information.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down doorbell driver information (CIK)
+ */
+void amdgpu_doorbell_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->doorbell.kernel_doorbells,
+ NULL,
+ (void **)&adev->doorbell.cpu_addr);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index c9a41c997c6c..81edf66dbea8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -26,30 +26,31 @@
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem.h>
-#include <drm/drm_vblank.h>
#include <drm/drm_managed.h>
-#include "amdgpu_drv.h"
-
#include <drm/drm_pciids.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/vga_switcheroo.h>
#include <drm/drm_probe_helper.h>
-#include <linux/mmu_notifier.h>
-#include <linux/suspend.h>
+#include <drm/drm_vblank.h>
+
#include <linux/cc_platform.h>
#include <linux/dynamic_debug.h>
+#include <linux/module.h>
+#include <linux/mmu_notifier.h>
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+#include <linux/vga_switcheroo.h>
#include "amdgpu.h"
-#include "amdgpu_irq.h"
+#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
-#include "amdgpu_sched.h"
+#include "amdgpu_drv.h"
#include "amdgpu_fdinfo.h"
-#include "amdgpu_amdkfd.h"
-
+#include "amdgpu_irq.h"
+#include "amdgpu_psp.h"
#include "amdgpu_ras.h"
-#include "amdgpu_xgmi.h"
#include "amdgpu_reset.h"
+#include "amdgpu_sched.h"
+#include "amdgpu_xgmi.h"
+#include "../amdxcp/amdgpu_xcp_drv.h"
/*
* KMS wrapper.
@@ -110,9 +111,11 @@
* 3.52.0 - Add AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD, add device_info fields:
* tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size,
* gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi
+ * 3.53.0 - Support for GFX11 CP GFX shadowing
+ * 3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 52
+#define KMS_DRIVER_MINOR 54
#define KMS_DRIVER_PATCHLEVEL 0
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -150,7 +153,7 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
-
+bool enforce_isolation;
/*
* OverDrive(bit 14) disabled by default
* GFX DCS(bit 19) disabled by default
@@ -177,20 +180,20 @@ uint amdgpu_dc_feature_mask = 2;
uint amdgpu_dc_debug_mask;
uint amdgpu_dc_visual_confirm;
int amdgpu_async_gfx_ring = 1;
-int amdgpu_mcbp;
+int amdgpu_mcbp = -1;
int amdgpu_discovery = -1;
int amdgpu_mes;
int amdgpu_mes_kiq;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
int amdgpu_tmz = -1; /* auto */
-uint amdgpu_freesync_vid_mode;
int amdgpu_reset_method = -1; /* auto */
int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
int amdgpu_use_xgmi_p2p = 1;
int amdgpu_vcnfw_log;
int amdgpu_sg_display = -1; /* auto */
+int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE;
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -309,9 +312,7 @@ module_param_named(msi, amdgpu_msi, int, 0444);
* jobs is 10000. The timeout for compute is 60000.
*/
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
- "for passthrough or sriov, 10000 for all jobs."
- " 0: keep default value. negative: infinity timeout), "
- "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+ "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
"for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
@@ -346,8 +347,9 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
* Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down
* the dGPUs when they are idle if supported. The default is -1 (auto enable).
* Setting the value to 0 disables this functionality.
+ * Setting the value to -2 is auto enabled with power down when displays are attached.
*/
-MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto)");
+MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = autowith displays)");
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
/**
@@ -580,7 +582,7 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
*/
#ifdef CONFIG_DRM_AMDGPU_SI
-#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
+#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
int amdgpu_si_support = 0;
MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
#else
@@ -599,7 +601,7 @@ module_param_named(si_support, amdgpu_si_support, int, 0444);
*/
#ifdef CONFIG_DRM_AMDGPU_CIK
-#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
+#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
int amdgpu_cik_support = 0;
MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
#else
@@ -616,8 +618,7 @@ module_param_named(cik_support, amdgpu_cik_support, int, 0444);
* E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
*/
MODULE_PARM_DESC(smu_memory_pool_size,
- "reserve gtt for smu debug usage, 0 = disable,"
- "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
+ "reserve gtt for smu debug usage, 0 = disable,0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
/**
@@ -630,10 +631,10 @@ module_param_named(async_gfx_ring, amdgpu_async_gfx_ring, int, 0444);
/**
* DOC: mcbp (int)
- * It is used to enable mid command buffer preemption. (0 = disabled (default), 1 = enabled)
+ * It is used to enable mid command buffer preemption. (0 = disabled, 1 = enabled, -1 auto (default))
*/
MODULE_PARM_DESC(mcbp,
- "Enable Mid-command buffer preemption (0 = disabled (default), 1 = enabled)");
+ "Enable Mid-command buffer preemption (0 = disabled, 1 = enabled), -1 = auto (default)");
module_param_named(mcbp, amdgpu_mcbp, int, 0444);
/**
@@ -755,20 +756,6 @@ MODULE_PARM_DESC(debug_largebar,
"Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)");
/**
- * DOC: ignore_crat (int)
- * Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT
- * table to get information about AMD APUs. This option can serve as a workaround on
- * systems with a broken CRAT table.
- *
- * Default is auto (according to asic type, iommu_v2, and crat table, to decide
- * whether use CRAT)
- */
-int ignore_crat;
-module_param(ignore_crat, int, 0444);
-MODULE_PARM_DESC(ignore_crat,
- "Ignore CRAT table during KFD initialization (0 = auto (default), 1 = ignore CRAT)");
-
-/**
* DOC: halt_if_hws_hang (int)
* Halt if HWS hang is detected. Default value, 0, disables the halt on hang.
* Setting 1 enables halt on hang.
@@ -787,9 +774,9 @@ module_param(hws_gws_support, bool, 0444);
MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
/**
- * DOC: queue_preemption_timeout_ms (int)
- * queue preemption timeout in ms (1 = Minimum, 9000 = default)
- */
+ * DOC: queue_preemption_timeout_ms (int)
+ * queue preemption timeout in ms (1 = Minimum, 9000 = default)
+ */
int queue_preemption_timeout_ms = 9000;
module_param(queue_preemption_timeout_ms, int, 0644);
MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)");
@@ -820,6 +807,13 @@ module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm
#endif
/**
+ * DOC: mtype_local (int)
+ */
+int amdgpu_mtype_local;
+MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)");
+module_param_named(mtype_local, amdgpu_mtype_local, int, 0444);
+
+/**
* DOC: pcie_p2p (bool)
* Enable PCIe P2P (requires large-BAR). Default value: true (on)
*/
@@ -878,32 +872,6 @@ MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)
module_param_named(tmz, amdgpu_tmz, int, 0444);
/**
- * DOC: freesync_video (uint)
- * Enable the optimization to adjust front porch timing to achieve seamless
- * mode change experience when setting a freesync supported mode for which full
- * modeset is not needed.
- *
- * The Display Core will add a set of modes derived from the base FreeSync
- * video mode into the corresponding connector's mode list based on commonly
- * used refresh rates and VRR range of the connected display, when users enable
- * this feature. From the userspace perspective, they can see a seamless mode
- * change experience when the change between different refresh rates under the
- * same resolution. Additionally, userspace applications such as Video playback
- * can read this modeset list and change the refresh rate based on the video
- * frame rate. Finally, the userspace can also derive an appropriate mode for a
- * particular refresh rate based on the FreeSync Mode and add it to the
- * connector's mode list.
- *
- * Note: This is an experimental feature.
- *
- * The default value: 0 (off).
- */
-MODULE_PARM_DESC(
- freesync_video,
- "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
-module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
-
-/**
* DOC: reset_method (int)
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
@@ -948,6 +916,28 @@ MODULE_PARM_DESC(smu_pptable_id,
"specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)");
module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444);
+/**
+ * DOC: partition_mode (int)
+ * Used to override the default SPX mode.
+ */
+MODULE_PARM_DESC(
+ user_partt_mode,
+ "specify partition mode to be used (-2 = AMDGPU_AUTO_COMPUTE_PARTITION_MODE(default value) \
+ 0 = AMDGPU_SPX_PARTITION_MODE, \
+ 1 = AMDGPU_DPX_PARTITION_MODE, \
+ 2 = AMDGPU_TPX_PARTITION_MODE, \
+ 3 = AMDGPU_QPX_PARTITION_MODE, \
+ 4 = AMDGPU_CPX_PARTITION_MODE)");
+module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
+
+
+/**
+ * DOC: enforce_isolation (bool)
+ * enforce process isolation between graphics and compute via using the same reserved vmid.
+ */
+module_param(enforce_isolation, bool, 0444);
+MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
@@ -1615,6 +1605,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
0x5874,
0x5940,
0x5941,
+ 0x5b70,
0x5b72,
0x5b73,
0x5b74,
@@ -1660,7 +1651,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
};
static const struct pci_device_id pciidlist[] = {
-#ifdef CONFIG_DRM_AMDGPU_SI
+#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -2017,6 +2008,11 @@ static const struct pci_device_id pciidlist[] = {
.class_mask = 0xffffff,
.driver_data = CHIP_IP_DISCOVERY },
+ { PCI_DEVICE(0x1002, PCI_ANY_ID),
+ .class = PCI_CLASS_ACCELERATOR_PROCESSING << 8,
+ .class_mask = 0xffffff,
+ .driver_data = CHIP_IP_DISCOVERY },
+
{0, 0, 0}
};
@@ -2161,6 +2157,10 @@ retry_init:
goto err_pci;
}
+ ret = amdgpu_xcp_dev_register(adev, ent);
+ if (ret)
+ goto err_pci;
+
/*
* 1. don't init fbdev on hw without DCE
* 2. don't init fbdev if there are no connectors
@@ -2233,6 +2233,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
+ amdgpu_xcp_dev_unplug(adev);
drm_dev_unplug(dev);
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
@@ -2373,7 +2374,6 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
amdgpu_amdkfd_device_init(adev);
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
- return;
}
static int amdgpu_pmops_prepare(struct device *dev)
@@ -2497,24 +2497,26 @@ static int amdgpu_runtime_idle_check_display(struct device *dev)
struct drm_connector_list_iter iter;
int ret = 0;
- /* XXX: Return busy if any displays are connected to avoid
- * possible display wakeups after runtime resume due to
- * hotplug events in case any displays were connected while
- * the GPU was in suspend. Remove this once that is fixed.
- */
- mutex_lock(&drm_dev->mode_config.mutex);
- drm_connector_list_iter_begin(drm_dev, &iter);
- drm_for_each_connector_iter(list_connector, &iter) {
- if (list_connector->status == connector_status_connected) {
- ret = -EBUSY;
- break;
+ if (amdgpu_runtime_pm != -2) {
+ /* XXX: Return busy if any displays are connected to avoid
+ * possible display wakeups after runtime resume due to
+ * hotplug events in case any displays were connected while
+ * the GPU was in suspend. Remove this once that is fixed.
+ */
+ mutex_lock(&drm_dev->mode_config.mutex);
+ drm_connector_list_iter_begin(drm_dev, &iter);
+ drm_for_each_connector_iter(list_connector, &iter) {
+ if (list_connector->status == connector_status_connected) {
+ ret = -EBUSY;
+ break;
+ }
}
- }
- drm_connector_list_iter_end(&iter);
- mutex_unlock(&drm_dev->mode_config.mutex);
+ drm_connector_list_iter_end(&iter);
+ mutex_unlock(&drm_dev->mode_config.mutex);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
if (adev->dc_enabled) {
struct drm_crtc *crtc;
@@ -2570,6 +2572,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
/* wait for all rings to drain before suspending */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (ring && ring->sched.ready) {
ret = amdgpu_fence_wait_empty(ring);
if (ret)
@@ -2694,6 +2697,7 @@ long amdgpu_drm_ioctl(struct file *filp,
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev;
long ret;
+
dev = file_priv->minor->dev;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0)
@@ -2758,9 +2762,8 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
if (!filp)
return -EINVAL;
- if (filp->f_op != &amdgpu_driver_kms_fops) {
+ if (filp->f_op != &amdgpu_driver_kms_fops)
return -EINVAL;
- }
file = filp->private_data;
*fpriv = file->driver_priv;
@@ -2806,10 +2809,31 @@ static const struct drm_driver amdgpu_kms_driver = {
.show_fdinfo = amdgpu_show_fdinfo,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = amdgpu_gem_prime_import,
- .gem_prime_mmap = drm_gem_prime_mmap,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = KMS_DRIVER_MAJOR,
+ .minor = KMS_DRIVER_MINOR,
+ .patchlevel = KMS_DRIVER_PATCHLEVEL,
+};
+
+const struct drm_driver amdgpu_partition_driver = {
+ .driver_features =
+ DRIVER_GEM | DRIVER_RENDER | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE,
+ .open = amdgpu_driver_open_kms,
+ .postclose = amdgpu_driver_postclose_kms,
+ .lastclose = amdgpu_driver_lastclose_kms,
+ .ioctls = amdgpu_ioctls_kms,
+ .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
+ .dumb_create = amdgpu_mode_dumb_create,
+ .dumb_map_offset = amdgpu_mode_dumb_mmap,
+ .fops = &amdgpu_driver_kms_fops,
+ .release = &amdgpu_driver_release_kms,
+
+ .gem_prime_import = amdgpu_gem_prime_import,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -2826,18 +2850,13 @@ static struct pci_error_handlers amdgpu_pci_err_handler = {
.resume = amdgpu_pci_resume,
};
-extern const struct attribute_group amdgpu_vram_mgr_attr_group;
-extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
-extern const struct attribute_group amdgpu_vbios_version_attr_group;
-
static const struct attribute_group *amdgpu_sysfs_groups[] = {
&amdgpu_vram_mgr_attr_group,
&amdgpu_gtt_mgr_attr_group,
- &amdgpu_vbios_version_attr_group,
+ &amdgpu_flash_attr_group,
NULL,
};
-
static struct pci_driver amdgpu_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
@@ -2886,9 +2905,11 @@ static void __exit amdgpu_exit(void)
amdgpu_amdkfd_fini();
pci_unregister_driver(&amdgpu_kms_pci_driver);
amdgpu_unregister_atpx_handler();
+ amdgpu_acpi_release();
amdgpu_sync_fini();
amdgpu_fence_slab_fini();
mmu_notifier_synchronize();
+ amdgpu_xcp_drv_release();
}
module_init(amdgpu_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index 8178323e4bef..5bc2cb661af7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -42,6 +42,8 @@
#define DRIVER_DESC "AMD GPU"
#define DRIVER_DATE "20150101"
+extern const struct drm_driver amdgpu_partition_driver;
+
long amdgpu_drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
index 7d2a908438e9..e71768661ca8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
@@ -183,6 +183,8 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
{
const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
u16 limit;
+ u16 ps; /* Partial size */
+ int res = 0, r;
if (!quirks)
limit = 0;
@@ -200,28 +202,25 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
eeprom_addr, buf_size,
read ? "read" : "write", EEPROM_OFFSET_SIZE);
return -EINVAL;
- } else {
- u16 ps; /* Partial size */
- int res = 0, r;
-
- /* The "limit" includes all data bytes sent/received,
- * which would include the EEPROM_OFFSET_SIZE bytes.
- * Account for them here.
- */
- limit -= EEPROM_OFFSET_SIZE;
- for ( ; buf_size > 0;
- buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
- ps = min(limit, buf_size);
-
- r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
- eeprom_buf, ps, read);
- if (r < 0)
- return r;
- res += r;
- }
+ }
- return res;
+ /* The "limit" includes all data bytes sent/received,
+ * which would include the EEPROM_OFFSET_SIZE bytes.
+ * Account for them here.
+ */
+ limit -= EEPROM_OFFSET_SIZE;
+ for ( ; buf_size > 0;
+ buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
+ ps = min(limit, buf_size);
+
+ r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
+ eeprom_buf, ps, read);
+ if (r < 0)
+ return r;
+ res += r;
}
+
+ return res;
}
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 27a782a9dc72..3aaeed2d3562 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -70,6 +70,7 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
amdgpu_encoder->active_device, amdgpu_encoder->devices,
@@ -165,12 +166,12 @@ void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
- unsigned hblank = native_mode->htotal - native_mode->hdisplay;
- unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
- unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
- unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
- unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
- unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
+ unsigned int hblank = native_mode->htotal - native_mode->hdisplay;
+ unsigned int vblank = native_mode->vtotal - native_mode->vdisplay;
+ unsigned int hover = native_mode->hsync_start - native_mode->hdisplay;
+ unsigned int vover = native_mode->vsync_start - native_mode->vdisplay;
+ unsigned int hsync_width = native_mode->hsync_end - native_mode->hsync_start;
+ unsigned int vsync_width = native_mode->vsync_end - native_mode->vsync_start;
adjusted_mode->clock = native_mode->clock;
adjusted_mode->flags = native_mode->flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 13d7413d4ca3..6038b5021b27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -89,7 +89,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid);
drm_printf(p, "drm-driver:\t%s\n", file->minor->dev->driver->name);
drm_printf(p, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn);
- drm_printf(p, "drm-client-id:\t%Lu\n", vm->immediate.fence_context);
+ drm_printf(p, "drm-client-id:\t%llu\n", vm->immediate.fence_context);
drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);
@@ -109,7 +109,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
if (!usage[hw_ip])
continue;
- drm_printf(p, "drm-engine-%s:\t%Ld ns\n", amdgpu_ip_name[hw_ip],
+ drm_printf(p, "drm-engine-%s:\t%lld ns\n", amdgpu_ip_name[hw_ip],
ktime_to_ns(usage[hw_ip]));
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index f52d0ba91a77..7537f5aa76f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -42,7 +42,6 @@
#include "amdgpu_reset.h"
/*
- * Fences
* Fences mark an event in the GPUs pipeline and are used
* for GPU/CPU synchronization. When the fence is written,
* it is expected that all buffers associated with that fence
@@ -140,7 +139,7 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* Returns 0 on success, -ENOMEM on failure.
*/
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
- unsigned flags)
+ unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
@@ -174,11 +173,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
adev->fence_context + ring->idx, seq);
/* Against remove in amdgpu_job_{free, free_cb} */
dma_fence_get(fence);
- }
- else
+ } else {
dma_fence_init(fence, &amdgpu_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
+ }
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
@@ -377,14 +376,11 @@ signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
uint32_t wait_seq,
signed long timeout)
{
- uint32_t seq;
-
- do {
- seq = amdgpu_fence_read(ring);
- udelay(5);
- timeout -= 5;
- } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
+ while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) {
+ udelay(2);
+ timeout -= 2;
+ }
return timeout > 0 ? timeout : 0;
}
/**
@@ -396,7 +392,7 @@ signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
* Returns the number of emitted fences on the ring. Used by the
* dynpm code to ring track activity.
*/
-unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
+unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
{
uint64_t emitted;
@@ -475,7 +471,7 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
*/
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
- unsigned irq_type)
+ unsigned int irq_type)
{
struct amdgpu_device *adev = ring->adev;
uint64_t index;
@@ -556,6 +552,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
}
/**
+ * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
+ * fence driver interrupts need to be restored.
+ *
+ * @ring: ring that to be checked
+ *
+ * Interrupts for rings that belong to GFX IP don't need to be restored
+ * when the target power state is s0ix.
+ *
+ * Return true if need to restore interrupts, false otherwise.
+ */
+static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool is_gfx_power_domain = false;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_SDMA:
+ /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
+ if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0))
+ is_gfx_power_domain = true;
+ break;
+ case AMDGPU_RING_TYPE_GFX:
+ case AMDGPU_RING_TYPE_COMPUTE:
+ case AMDGPU_RING_TYPE_KIQ:
+ case AMDGPU_RING_TYPE_MES:
+ is_gfx_power_domain = true;
+ break;
+ default:
+ break;
+ }
+
+ return !(adev->in_s0ix && is_gfx_power_domain);
+}
+
+/**
* amdgpu_fence_driver_hw_fini - tear down the fence driver
* for all possible rings.
*
@@ -582,7 +613,9 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
if (r)
amdgpu_fence_driver_force_completion(ring);
- if (ring->fence_drv.irq_src)
+ if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
+ ring->fence_drv.irq_src &&
+ amdgpu_fence_need_ring_interrupt_restore(ring))
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
@@ -653,11 +686,13 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring || !ring->fence_drv.initialized)
continue;
/* enable the interrupt */
- if (ring->fence_drv.irq_src)
+ if (ring->fence_drv.irq_src &&
+ amdgpu_fence_need_ring_interrupt_restore(ring))
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}
@@ -694,6 +729,30 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
}
/**
+ * amdgpu_fence_driver_set_error - set error code on fences
+ * @ring: the ring which contains the fences
+ * @error: the error code to set
+ *
+ * Set an error code to all the fences pending on the ring.
+ */
+void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error)
+{
+ struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv->lock, flags);
+ for (unsigned int i = 0; i <= drv->num_fences_mask; ++i) {
+ struct dma_fence *fence;
+
+ fence = rcu_dereference_protected(drv->fences[i],
+ lockdep_is_held(&drv->lock));
+ if (fence && !dma_fence_is_signaled_locked(fence))
+ dma_fence_set_error(fence, error);
+ }
+ spin_unlock_irqrestore(&drv->lock, flags);
+}
+
+/**
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
*
* @ring: fence of the ring to signal
@@ -701,6 +760,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
*/
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
{
+ amdgpu_fence_driver_set_error(ring, -ECANCELED);
amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
amdgpu_fence_process(ring);
}
@@ -835,11 +895,12 @@ static const struct dma_fence_ops amdgpu_job_fence_ops = {
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct amdgpu_device *adev = m->private;
int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring || !ring->fence_drv.initialized)
continue;
@@ -913,6 +974,7 @@ static void amdgpu_debugfs_reset_work(struct work_struct *work)
reset_work);
struct amdgpu_reset_context reset_context;
+
memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 4620c4712ce3..9c66d98af6d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -60,10 +60,10 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
switch (adev->asic_type) {
case CHIP_VEGA20:
/* D161 and D163 are the VG20 server SKUs */
- if (strnstr(atom_ctx->vbios_version, "D161",
- sizeof(atom_ctx->vbios_version)) ||
- strnstr(atom_ctx->vbios_version, "D163",
- sizeof(atom_ctx->vbios_version))) {
+ if (strnstr(atom_ctx->vbios_pn, "D161",
+ sizeof(atom_ctx->vbios_pn)) ||
+ strnstr(atom_ctx->vbios_pn, "D163",
+ sizeof(atom_ctx->vbios_pn))) {
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_6;
return true;
@@ -72,22 +72,23 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
}
case CHIP_ALDEBARAN:
/* All Aldebaran SKUs have an FRU */
- if (!strnstr(atom_ctx->vbios_version, "D673",
- sizeof(atom_ctx->vbios_version)))
+ if (!strnstr(atom_ctx->vbios_pn, "D673",
+ sizeof(atom_ctx->vbios_pn)))
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_6;
return true;
case CHIP_SIENNA_CICHLID:
- if (strnstr(atom_ctx->vbios_version, "D603",
- sizeof(atom_ctx->vbios_version))) {
- if (strnstr(atom_ctx->vbios_version, "D603GLXE",
- sizeof(atom_ctx->vbios_version))) {
+ if (strnstr(atom_ctx->vbios_pn, "D603",
+ sizeof(atom_ctx->vbios_pn))) {
+ if (strnstr(atom_ctx->vbios_pn, "D603GLXE",
+ sizeof(atom_ctx->vbios_pn))) {
return false;
- } else {
- if (fru_addr)
- *fru_addr = FRU_EEPROM_MADDR_6;
- return true;
}
+
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_6;
+ return true;
+
} else {
return false;
}
@@ -211,3 +212,92 @@ Out:
kfree(pia);
return 0;
}
+
+/**
+ * DOC: product_name
+ *
+ * The amdgpu driver provides a sysfs API for reporting the product name
+ * for the device
+ * The file product_name is used for this and returns the product name
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_fru_product_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%s\n", adev->product_name);
+}
+
+static DEVICE_ATTR(product_name, 0444, amdgpu_fru_product_name_show, NULL);
+
+/**
+ * DOC: product_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the part number
+ * for the device
+ * The file product_number is used for this and returns the part number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_fru_product_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%s\n", adev->product_number);
+}
+
+static DEVICE_ATTR(product_number, 0444, amdgpu_fru_product_number_show, NULL);
+
+/**
+ * DOC: serial_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the serial number
+ * for the device
+ * The file serial_number is used for this and returns the serial number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_fru_serial_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%s\n", adev->serial);
+}
+
+static DEVICE_ATTR(serial_number, 0444, amdgpu_fru_serial_number_show, NULL);
+
+static const struct attribute *amdgpu_fru_attributes[] = {
+ &dev_attr_product_name.attr,
+ &dev_attr_product_number.attr,
+ &dev_attr_serial_number.attr,
+ NULL
+};
+
+int amdgpu_fru_sysfs_init(struct amdgpu_device *adev)
+{
+ if (!is_fru_eeprom_supported(adev, NULL))
+ return 0;
+
+ return sysfs_create_files(&adev->dev->kobj, amdgpu_fru_attributes);
+}
+
+void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (!is_fru_eeprom_supported(adev, NULL))
+ return;
+
+ sysfs_remove_files(&adev->dev->kobj, amdgpu_fru_attributes);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
index 1308d976d60e..c817db17cfa7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
@@ -25,5 +25,7 @@
#define __AMDGPU_FRU_EEPROM_H__
int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
+int amdgpu_fru_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev);
#endif // __AMDGPU_FRU_EEPROM_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 2ca3c329de6d..2d4b67175b55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -32,17 +32,15 @@
#include "soc15_common.h"
#define FW_ATTESTATION_DB_COOKIE 0x143b6a37
-#define FW_ATTESTATION_RECORD_VALID 1
+#define FW_ATTESTATION_RECORD_VALID 1
#define FW_ATTESTATION_MAX_SIZE 4096
-typedef struct FW_ATT_DB_HEADER
-{
+struct FW_ATT_DB_HEADER {
uint32_t AttDbVersion; /* version of the fwar feature */
uint32_t AttDbCookie; /* cookie as an extra check for corrupt data */
-} FW_ATT_DB_HEADER;
+};
-typedef struct FW_ATT_RECORD
-{
+struct FW_ATT_RECORD {
uint16_t AttFwIdV1; /* Legacy FW Type field */
uint16_t AttFwIdV2; /* V2 FW ID field */
uint32_t AttFWVersion; /* FW Version */
@@ -50,7 +48,7 @@ typedef struct FW_ATT_RECORD
uint8_t AttSource; /* FW source indicator */
uint8_t RecordValid; /* Indicates whether the record is a valid entry */
uint32_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */
-} FW_ATT_RECORD;
+};
static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
char __user *buf,
@@ -60,15 +58,15 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
uint64_t records_addr = 0;
uint64_t vram_pos = 0;
- FW_ATT_DB_HEADER fw_att_hdr = {0};
- FW_ATT_RECORD fw_att_record = {0};
+ struct FW_ATT_DB_HEADER fw_att_hdr = {0};
+ struct FW_ATT_RECORD fw_att_record = {0};
- if (size < sizeof(FW_ATT_RECORD)) {
+ if (size < sizeof(struct FW_ATT_RECORD)) {
DRM_WARN("FW attestation input buffer not enough memory");
return -EINVAL;
}
- if ((*pos + sizeof(FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) {
+ if ((*pos + sizeof(struct FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) {
DRM_WARN("FW attestation out of bounds");
return 0;
}
@@ -83,8 +81,8 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
if (*pos == 0) {
amdgpu_device_vram_access(adev,
vram_pos,
- (uint32_t*)&fw_att_hdr,
- sizeof(FW_ATT_DB_HEADER),
+ (uint32_t *)&fw_att_hdr,
+ sizeof(struct FW_ATT_DB_HEADER),
false);
if (fw_att_hdr.AttDbCookie != FW_ATTESTATION_DB_COOKIE) {
@@ -96,20 +94,20 @@ static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
}
amdgpu_device_vram_access(adev,
- vram_pos + sizeof(FW_ATT_DB_HEADER) + *pos,
- (uint32_t*)&fw_att_record,
- sizeof(FW_ATT_RECORD),
+ vram_pos + sizeof(struct FW_ATT_DB_HEADER) + *pos,
+ (uint32_t *)&fw_att_record,
+ sizeof(struct FW_ATT_RECORD),
false);
if (fw_att_record.RecordValid != FW_ATTESTATION_RECORD_VALID)
return 0;
- if (copy_to_user(buf, (void*)&fw_att_record, sizeof(FW_ATT_RECORD)))
+ if (copy_to_user(buf, (void *)&fw_att_record, sizeof(struct FW_ATT_RECORD)))
return -EINVAL;
- *pos += sizeof(FW_ATT_RECORD);
+ *pos += sizeof(struct FW_ATT_RECORD);
- return sizeof(FW_ATT_RECORD);
+ return sizeof(struct FW_ATT_RECORD);
}
static const struct file_operations amdgpu_fw_attestation_debugfs_ops = {
@@ -136,7 +134,7 @@ void amdgpu_fw_attestation_debugfs_init(struct amdgpu_device *adev)
return;
debugfs_create_file("amdgpu_fw_attestation",
- S_IRUSR,
+ 0400,
adev_to_drm(adev)->primary->debugfs_root,
adev,
&amdgpu_fw_attestation_debugfs_ops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 01cb89ffbd56..73b8cca35bab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -35,6 +35,7 @@
#endif
#include "amdgpu.h"
#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_tt.h>
/*
* GART
@@ -103,6 +104,142 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
}
/**
+ * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate system memory for GART page table for ASICs that don't have
+ * dedicated VRAM.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
+{
+ unsigned int order = get_order(adev->gart.table_size);
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
+ struct amdgpu_bo *bo = NULL;
+ struct sg_table *sg = NULL;
+ struct amdgpu_bo_param bp;
+ dma_addr_t dma_addr;
+ struct page *p;
+ int ret;
+
+ if (adev->gart.bo != NULL)
+ return 0;
+
+ p = alloc_pages(gfp_flags, order);
+ if (!p)
+ return -ENOMEM;
+
+ /* If the hardware does not support UTCL2 snooping of the CPU caches
+ * then set_memory_wc() could be used as a workaround to mark the pages
+ * as write combine memory.
+ */
+ dma_addr = dma_map_page(&adev->pdev->dev, p, 0, adev->gart.table_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&adev->pdev->dev, dma_addr)) {
+ dev_err(&adev->pdev->dev, "Failed to DMA MAP the GART BO page\n");
+ __free_pages(p, order);
+ p = NULL;
+ return -EFAULT;
+ }
+
+ dev_info(adev->dev, "%s dma_addr:%pad\n", __func__, &dma_addr);
+ /* Create SG table */
+ sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ ret = sg_alloc_table(sg, 1, GFP_KERNEL);
+ if (ret)
+ goto error;
+
+ sg_dma_address(sg->sgl) = dma_addr;
+ sg->sgl->length = adev->gart.table_size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->sgl->dma_length = adev->gart.table_size;
+#endif
+ /* Create SG BO */
+ memset(&bp, 0, sizeof(bp));
+ bp.size = adev->gart.table_size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_CPU;
+ bp.type = ttm_bo_type_sg;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+ bp.flags = 0;
+ ret = amdgpu_bo_create(adev, &bp, &bo);
+ if (ret)
+ goto error;
+
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ ret = amdgpu_bo_reserve(bo, true);
+ if (ret) {
+ dev_err(adev->dev, "(%d) failed to reserve bo for GART system bo\n", ret);
+ goto error;
+ }
+
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ WARN(ret, "Pinning the GART table failed");
+ if (ret)
+ goto error_resv;
+
+ adev->gart.bo = bo;
+ adev->gart.ptr = page_to_virt(p);
+ /* Make GART table accessible in VMID0 */
+ ret = amdgpu_ttm_alloc_gart(&adev->gart.bo->tbo);
+ if (ret)
+ amdgpu_gart_table_ram_free(adev);
+ amdgpu_bo_unreserve(bo);
+
+ return 0;
+
+error_resv:
+ amdgpu_bo_unreserve(bo);
+error:
+ amdgpu_bo_unref(&bo);
+ if (sg) {
+ sg_free_table(sg);
+ kfree(sg);
+ }
+ __free_pages(p, order);
+ return ret;
+}
+
+/**
+ * amdgpu_gart_table_ram_free - free gart page table system ram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free the system memory used for the GART page tableon ASICs that don't
+ * have dedicated VRAM.
+ */
+void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
+{
+ unsigned int order = get_order(adev->gart.table_size);
+ struct sg_table *sg = adev->gart.bo->tbo.sg;
+ struct page *p;
+ int ret;
+
+ ret = amdgpu_bo_reserve(adev->gart.bo, false);
+ if (!ret) {
+ amdgpu_bo_unpin(adev->gart.bo);
+ amdgpu_bo_unreserve(adev->gart.bo);
+ }
+ amdgpu_bo_unref(&adev->gart.bo);
+ sg_free_table(sg);
+ kfree(sg);
+ p = virt_to_page(adev->gart.ptr);
+ __free_pages(p, order);
+
+ adev->gart.ptr = NULL;
+}
+
+/**
* amdgpu_gart_table_vram_alloc - allocate vram for gart page table
*
* @adev: amdgpu_device pointer
@@ -182,7 +319,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
}
mb();
amdgpu_device_flush_hdp(adev, NULL);
- for (i = 0; i < adev->num_vmhubs; i++)
+ for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
drm_dev_exit(idx);
@@ -264,7 +401,7 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
mb();
amdgpu_device_flush_hdp(adev, NULL);
- for (i = 0; i < adev->num_vmhubs; i++)
+ for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 8fea3e04e411..8283d682f543 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -51,6 +51,8 @@ struct amdgpu_gart {
uint64_t gart_pte_flags;
};
+int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
+void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 863cb668e000..ca4d2d430e28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
+#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_tt.h>
@@ -98,7 +99,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
u64 flags, enum ttm_bo_type type,
struct dma_resv *resv,
- struct drm_gem_object **obj)
+ struct drm_gem_object **obj, int8_t xcp_id_plus1)
{
struct amdgpu_bo *bo;
struct amdgpu_bo_user *ubo;
@@ -116,6 +117,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
bp.flags = flags;
bp.domain = initial_domain;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+ bp.xcp_id_plus1 = xcp_id_plus1;
r = amdgpu_bo_create_user(adev, &bp, &ubo);
if (r)
@@ -180,11 +182,10 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
return r;
bo_va = amdgpu_vm_bo_find(vm, abo);
- if (!bo_va) {
+ if (!bo_va)
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
- } else {
+ else
++bo_va->ref_count;
- }
amdgpu_bo_unreserve(abo);
return 0;
}
@@ -197,29 +198,24 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_bo_list_entry vm_pd;
- struct list_head list, duplicates;
struct dma_fence *fence = NULL;
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
+ struct drm_exec exec;
long r;
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&duplicates);
-
- tv.bo = &bo->tbo;
- tv.num_shared = 2;
- list_add(&tv.head, &list);
-
- amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
-
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
- if (r) {
- dev_err(adev->dev, "leaking bo va because "
- "we fail to reserve bo (%ld)\n", r);
- return;
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto out_unlock;
+
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto out_unlock;
}
+
bo_va = amdgpu_vm_bo_find(vm, bo);
if (!bo_va || --bo_va->ref_count)
goto out_unlock;
@@ -229,6 +225,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
r = amdgpu_vm_clear_freed(adev, vm, &fence);
+ if (unlikely(r < 0))
+ dev_err(adev->dev, "failed to clear page "
+ "tables on GEM object close (%ld)\n", r);
if (r || !fence)
goto out_unlock;
@@ -236,10 +235,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
dma_fence_put(fence);
out_unlock:
- if (unlikely(r < 0))
- dev_err(adev->dev, "failed to clear page "
- "tables on GEM object close (%ld)\n", r);
- ttm_eu_backoff_reservation(&ticket, &list);
+ if (r)
+ dev_err(adev->dev, "leaking bo va (%ld)\n", r);
+ drm_exec_fini(&exec);
}
static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
@@ -291,6 +289,10 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle, initial_domain;
int r;
+ /* reject DOORBELLs until userspace code to use it is available */
+ if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
+ return -EINVAL;
+
/* reject invalid gem flags */
if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
@@ -336,7 +338,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
- flags, ttm_bo_type_device, resv, &gobj);
+ flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1);
if (r && r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
@@ -379,6 +381,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_userptr *args = data;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_gem_object *gobj;
struct hmm_range *range;
struct amdgpu_bo *bo;
@@ -405,7 +408,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
- 0, ttm_bo_type_device, NULL, &gobj);
+ 0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
if (r)
return r;
@@ -461,9 +464,9 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
struct amdgpu_bo *robj;
gobj = drm_gem_object_lookup(filp, handle);
- if (gobj == NULL) {
+ if (!gobj)
return -ENOENT;
- }
+
robj = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
@@ -480,6 +483,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_gem_mmap *args = data;
uint32_t handle = args->in.handle;
+
memset(args, 0, sizeof(*args));
return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
}
@@ -506,7 +510,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
/* clamp timeout to avoid unsigned-> signed overflow */
- if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
+ if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
return MAX_SCHEDULE_TIMEOUT - 1;
return timeout_jiffies;
@@ -524,9 +528,9 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
long ret;
gobj = drm_gem_object_lookup(filp, handle);
- if (gobj == NULL) {
+ if (!gobj)
return -ENOENT;
- }
+
robj = gem_to_amdgpu_bo(gobj);
ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
true, timeout);
@@ -553,7 +557,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo *robj;
int r = -1;
- DRM_DEBUG("%d \n", args->handle);
+ DRM_DEBUG("%d\n", args->handle);
gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
@@ -673,17 +677,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
- struct amdgpu_bo_list_entry vm_pd;
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
- struct list_head list, duplicates;
+ struct drm_exec exec;
uint64_t va_flags;
uint64_t vm_size;
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
dev_dbg(dev->dev,
- "va_address 0x%LX is in reserved area 0x%LX\n",
+ "va_address 0x%llx is in reserved area 0x%llx\n",
args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
}
@@ -691,7 +692,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (args->va_address >= AMDGPU_GMC_HOLE_START &&
args->va_address < AMDGPU_GMC_HOLE_END) {
dev_dbg(dev->dev,
- "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
+ "va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
args->va_address, AMDGPU_GMC_HOLE_START,
AMDGPU_GMC_HOLE_END);
return -EINVAL;
@@ -726,36 +727,38 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&duplicates);
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
- tv.bo = &abo->tbo;
- if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
- tv.num_shared = 1;
- else
- tv.num_shared = 0;
- list_add(&tv.head, &list);
} else {
gobj = NULL;
abo = NULL;
}
- amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_until_all_locked(&exec) {
+ if (gobj) {
+ r = drm_exec_lock_obj(&exec, gobj);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error;
+ }
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
- if (r)
- goto error_unref;
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error;
+ }
if (abo) {
bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
if (!bo_va) {
r = -ENOENT;
- goto error_backoff;
+ goto error;
}
} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
bo_va = fpriv->prt_va;
@@ -792,10 +795,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
args->operation);
-error_backoff:
- ttm_eu_backoff_reservation(&ticket, &list);
-
-error_unref:
+error:
+ drm_exec_fini(&exec);
drm_gem_object_put(gobj);
return r;
}
@@ -811,9 +812,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
int r;
gobj = drm_gem_object_lookup(filp, args->handle);
- if (gobj == NULL) {
+ if (!gobj)
return -ENOENT;
- }
+
robj = gem_to_amdgpu_bo(gobj);
r = amdgpu_bo_reserve(robj, false);
@@ -908,6 +909,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct drm_gem_object *gobj;
uint32_t handle;
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
@@ -931,16 +933,16 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
domain = amdgpu_bo_get_preferred_domain(adev,
amdgpu_display_supported_domains(adev, flags));
r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
- ttm_bo_type_device, NULL, &gobj);
+ ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
if (r)
return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(gobj);
- if (r) {
+ if (r)
return r;
- }
+
args->handle = handle;
return 0;
}
@@ -948,7 +950,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct amdgpu_device *adev = m->private;
struct drm_device *dev = adev_to_drm(adev);
struct drm_file *file;
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 637bf51dbf06..f30264782ba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -43,8 +43,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
u64 flags, enum ttm_bo_type type,
struct dma_resv *resv,
- struct drm_gem_object **obj);
-
+ struct drm_gem_object **obj, int8_t xcp_id_plus1);
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index f3f541ba0aca..2382921710ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -28,6 +28,7 @@
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
#include "amdgpu_ras.h"
+#include "amdgpu_xcp.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -63,10 +64,10 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
}
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
- int mec, int pipe, int queue)
+ int xcc_id, int mec, int pipe, int queue)
{
return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
- adev->gfx.mec.queue_bitmap);
+ adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
}
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
@@ -109,9 +110,9 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
* The bitmask of CUs to be disabled in the shader array determined by se and
* sh is stored in mask[se * max_sh + sh].
*/
-void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
+void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
{
- unsigned se, sh, cu;
+ unsigned int se, sh, cu;
const char *p;
memset(mask, 0, sizeof(*mask) * max_se * max_sh);
@@ -123,6 +124,7 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
for (;;) {
char *next;
int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
+
if (ret < 3) {
DRM_ERROR("amdgpu: could not parse disable_cu\n");
return;
@@ -204,29 +206,38 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
- int i, queue, pipe;
+ int i, j, queue, pipe;
bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe,
adev->gfx.num_compute_rings);
+ int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
if (multipipe_policy) {
- /* policy: make queues evenly cross all pipes on MEC1 only */
- for (i = 0; i < max_queues_per_mec; i++) {
- pipe = i % adev->gfx.mec.num_pipe_per_mec;
- queue = (i / adev->gfx.mec.num_pipe_per_mec) %
- adev->gfx.mec.num_queue_per_pipe;
-
- set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
- adev->gfx.mec.queue_bitmap);
+ /* policy: make queues evenly cross all pipes on MEC1 only
+ * for multiple xcc, just use the original policy for simplicity */
+ for (j = 0; j < num_xcc; j++) {
+ for (i = 0; i < max_queues_per_mec; i++) {
+ pipe = i % adev->gfx.mec.num_pipe_per_mec;
+ queue = (i / adev->gfx.mec.num_pipe_per_mec) %
+ adev->gfx.mec.num_queue_per_pipe;
+
+ set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
+ adev->gfx.mec_bitmap[j].queue_bitmap);
+ }
}
} else {
/* policy: amdgpu owns all queues in the given pipe */
- for (i = 0; i < max_queues_per_mec; ++i)
- set_bit(i, adev->gfx.mec.queue_bitmap);
+ for (j = 0; j < num_xcc; j++) {
+ for (i = 0; i < max_queues_per_mec; ++i)
+ set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
+ }
}
- dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
+ for (j = 0; j < num_xcc; j++) {
+ dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
+ bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
+ }
}
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
@@ -258,7 +269,7 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
}
static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
+ struct amdgpu_ring *ring, int xcc_id)
{
int queue_bit;
int mec, pipe, queue;
@@ -268,7 +279,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
* adev->gfx.mec.num_queue_per_pipe;
while (--queue_bit >= 0) {
- if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
+ if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
continue;
amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
@@ -294,9 +305,9 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq)
+ struct amdgpu_irq_src *irq, int xcc_id)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
int r = 0;
spin_lock_init(&kiq->ring_lock);
@@ -304,16 +315,20 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->adev = NULL;
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = adev->doorbell_index.kiq;
- ring->vm_hub = AMDGPU_GFXHUB_0;
-
- r = amdgpu_gfx_kiq_acquire(adev, ring);
+ ring->xcc_id = xcc_id;
+ ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
+ ring->doorbell_index =
+ (adev->doorbell_index.kiq +
+ xcc_id * adev->doorbell_index.xcc_doorbell_range)
+ << 1;
+
+ r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
if (r)
return r;
ring->eop_gpu_addr = kiq->eop_gpu_addr;
ring->no_scheduler = true;
- sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
+ sprintf(ring->name, "kiq_%d.%d.%d.%d", xcc_id, ring->me, ring->pipe, ring->queue);
r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
@@ -327,19 +342,19 @@ void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
amdgpu_ring_fini(ring);
}
-void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
+void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
}
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
- unsigned hpd_size)
+ unsigned int hpd_size, int xcc_id)
{
int r;
u32 *hpd;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
@@ -362,13 +377,18 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
/* create MQD for each compute/gfx queue */
int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
- unsigned mqd_size)
+ unsigned int mqd_size, int xcc_id)
{
- struct amdgpu_ring *ring = NULL;
- int r, i;
+ int r, i, j;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *ring = &kiq->ring;
+ u32 domain = AMDGPU_GEM_DOMAIN_GTT;
+
+ /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
+ domain |= AMDGPU_GEM_DOMAIN_VRAM;
/* create MQD for KIQ */
- ring = &adev->gfx.kiq.ring;
if (!adev->enable_mes_kiq && !ring->mqd_obj) {
/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
* otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
@@ -387,9 +407,12 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
}
/* prepare MQD backup */
- adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
- if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
- dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
+ kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL);
+ if (!kiq->mqd_backup) {
+ dev_warn(adev->dev,
+ "no memory to create MQD backup for ring %s\n", ring->name);
+ return -ENOMEM;
+ }
}
if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
@@ -398,47 +421,55 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
ring = &adev->gfx.gfx_ring[i];
if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ domain, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
return r;
}
+ ring->mqd_size = mqd_size;
/* prepare MQD backup */
adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
- if (!adev->gfx.me.mqd_backup[i])
+ if (!adev->gfx.me.mqd_backup[i]) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
+ return -ENOMEM;
+ }
}
}
}
/* create MQD for each KCQ */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ ring = &adev->gfx.compute_ring[j];
if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ domain, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
return r;
}
+ ring->mqd_size = mqd_size;
/* prepare MQD backup */
- adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
- if (!adev->gfx.mec.mqd_backup[i])
+ adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
+ if (!adev->gfx.mec.mqd_backup[j]) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
+ return -ENOMEM;
+ }
}
}
return 0;
}
-void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
+void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
{
struct amdgpu_ring *ring = NULL;
- int i;
+ int i, j;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
@@ -451,43 +482,81 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- kfree(adev->gfx.mec.mqd_backup[i]);
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ ring = &adev->gfx.compute_ring[j];
+ kfree(adev->gfx.mec.mqd_backup[j]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
}
- ring = &adev->gfx.kiq.ring;
- kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
+ ring = &kiq->ring;
+ kfree(kiq->mqd_backup);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
}
-int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
+int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
struct amdgpu_ring *kiq_ring = &kiq->ring;
int i, r = 0;
+ int j;
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&kiq->ring_lock);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
adev->gfx.num_compute_rings)) {
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&kiq->ring_lock);
return -ENOMEM;
}
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ kiq->pmf->kiq_unmap_queues(kiq_ring,
+ &adev->gfx.compute_ring[j],
RESET_QUEUES, 0, 0);
+ }
+
+ if (kiq_ring->sched.ready && !adev->job_hang)
+ r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&kiq->ring_lock);
+
+ return r;
+}
+
+int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int i, r = 0;
+ int j;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock(&kiq->ring_lock);
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
+ adev->gfx.num_gfx_rings)) {
+ spin_unlock(&kiq->ring_lock);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_gfx_rings;
+ kiq->pmf->kiq_unmap_queues(kiq_ring,
+ &adev->gfx.gfx_ring[j],
+ PREEMPT_QUEUES, 0, 0);
+ }
+ }
- if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang)
+ if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
r = amdgpu_ring_test_helper(kiq_ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&kiq->ring_lock);
return r;
}
@@ -505,18 +574,18 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
return set_resource_bit;
}
-int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
+int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
uint64_t queue_mask = 0;
- int r, i;
+ int r, i, j;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
return -EINVAL;
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
- if (!test_bit(i, adev->gfx.mec.queue_bitmap))
+ if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
continue;
/* This situation may be hit in the future if a new HW
@@ -532,13 +601,15 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
kiq_ring->queue);
- spin_lock(&adev->gfx.kiq.ring_lock);
+ amdgpu_device_flush_hdp(adev, NULL);
+
+ spin_lock(&kiq->ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
kiq->pmf->set_resources_size);
if (r) {
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&kiq->ring_lock);
return r;
}
@@ -546,11 +617,51 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
queue_mask = ~0ULL;
kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ kiq->pmf->kiq_map_queues(kiq_ring,
+ &adev->gfx.compute_ring[j]);
+ }
r = amdgpu_ring_test_helper(kiq_ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&kiq->ring_lock);
+ if (r)
+ DRM_ERROR("KCQ enable failed\n");
+
+ return r;
+}
+
+int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int r, i, j;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
+ return -EINVAL;
+
+ amdgpu_device_flush_hdp(adev, NULL);
+
+ spin_lock(&kiq->ring_lock);
+ /* No need to map kcq on the slave */
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
+ adev->gfx.num_gfx_rings);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ spin_unlock(&kiq->ring_lock);
+ return r;
+ }
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_gfx_rings;
+ kiq->pmf->kiq_map_queues(kiq_ring,
+ &adev->gfx.gfx_ring[j]);
+ }
+ }
+
+ r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&kiq->ring_lock);
if (r)
DRM_ERROR("KCQ enable failed\n");
@@ -589,15 +700,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
- /* If going to s2idle, no need to wait */
- if (adev->in_s0ix) {
- if (!amdgpu_dpm_set_powergating_by_smu(adev,
- AMD_IP_BLOCK_TYPE_GFX, true))
- adev->gfx.gfx_off_state = true;
- } else {
- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
- }
}
} else {
if (adev->gfx.gfx_off_req_count == 0) {
@@ -785,12 +889,31 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
return 0;
}
+void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
+ void *ras_error_status,
+ void (*func)(struct amdgpu_device *adev, void *ras_error_status,
+ int xcc_id))
+{
+ int i;
+ int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
+ uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ if (err_data) {
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+ }
+
+ for_each_inst(i, xcc_mask)
+ func(adev, ras_error_status, i);
+}
+
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
signed long r, cnt = 0;
unsigned long flags;
uint32_t seq, reg_val_offs = 0, value = 0;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
if (amdgpu_device_skip_hw_access(adev))
@@ -858,7 +981,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
signed long r, cnt = 0;
unsigned long flags;
uint32_t seq;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_wreg);
@@ -1062,3 +1185,125 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
}
}
+
+bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
+{
+ return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
+ adev->gfx.num_xcc_per_xcp : 1));
+}
+
+static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
+ struct device_attribute *addr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int mode;
+
+ mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+ AMDGPU_XCP_FL_NONE);
+
+ return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
+}
+
+static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
+ struct device_attribute *addr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ enum amdgpu_gfx_partition mode;
+ int ret = 0, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ if (num_xcc % 2 != 0)
+ return -EINVAL;
+
+ if (!strncasecmp("SPX", buf, strlen("SPX"))) {
+ mode = AMDGPU_SPX_PARTITION_MODE;
+ } else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
+ /*
+ * DPX mode needs AIDs to be in multiple of 2.
+ * Each AID connects 2 XCCs.
+ */
+ if (num_xcc%4)
+ return -EINVAL;
+ mode = AMDGPU_DPX_PARTITION_MODE;
+ } else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
+ if (num_xcc != 6)
+ return -EINVAL;
+ mode = AMDGPU_TPX_PARTITION_MODE;
+ } else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
+ if (num_xcc != 8)
+ return -EINVAL;
+ mode = AMDGPU_QPX_PARTITION_MODE;
+ } else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
+ mode = AMDGPU_CPX_PARTITION_MODE;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
+ struct device_attribute *addr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ char *supported_partition;
+
+ /* TBD */
+ switch (NUM_XCC(adev->gfx.xcc_mask)) {
+ case 8:
+ supported_partition = "SPX, DPX, QPX, CPX";
+ break;
+ case 6:
+ supported_partition = "SPX, TPX, CPX";
+ break;
+ case 4:
+ supported_partition = "SPX, DPX, CPX";
+ break;
+ /* this seems only existing in emulation phase */
+ case 2:
+ supported_partition = "SPX, CPX";
+ break;
+ default:
+ supported_partition = "Not supported";
+ break;
+ }
+
+ return sysfs_emit(buf, "%s\n", supported_partition);
+}
+
+static DEVICE_ATTR(current_compute_partition, 0644,
+ amdgpu_gfx_get_current_compute_partition,
+ amdgpu_gfx_set_compute_partition);
+
+static DEVICE_ATTR(available_compute_partition, 0444,
+ amdgpu_gfx_get_available_compute_partition, NULL);
+
+int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
+ if (r)
+ return r;
+
+ r = device_create_file(adev->dev, &dev_attr_available_compute_partition);
+
+ return r;
+}
+
+void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
+{
+ device_remove_file(adev->dev, &dev_attr_current_compute_partition);
+ device_remove_file(adev->dev, &dev_attr_available_compute_partition);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index bfabea76d166..a4ff515ce896 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -61,7 +61,42 @@ enum amdgpu_gfx_partition {
AMDGPU_TPX_PARTITION_MODE = 2,
AMDGPU_QPX_PARTITION_MODE = 3,
AMDGPU_CPX_PARTITION_MODE = 4,
- AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE,
+ AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE = -1,
+ /* Automatically choose the right mode */
+ AMDGPU_AUTO_COMPUTE_PARTITION_MODE = -2,
+};
+
+#define NUM_XCC(x) hweight16(x)
+
+enum amdgpu_pkg_type {
+ AMDGPU_PKG_TYPE_APU = 2,
+ AMDGPU_PKG_TYPE_UNKNOWN,
+};
+
+enum amdgpu_gfx_ras_mem_id_type {
+ AMDGPU_GFX_CP_MEM = 0,
+ AMDGPU_GFX_GCEA_MEM,
+ AMDGPU_GFX_GC_CANE_MEM,
+ AMDGPU_GFX_GCUTCL2_MEM,
+ AMDGPU_GFX_GDS_MEM,
+ AMDGPU_GFX_LDS_MEM,
+ AMDGPU_GFX_RLC_MEM,
+ AMDGPU_GFX_SP_MEM,
+ AMDGPU_GFX_SPI_MEM,
+ AMDGPU_GFX_SQC_MEM,
+ AMDGPU_GFX_SQ_MEM,
+ AMDGPU_GFX_TA_MEM,
+ AMDGPU_GFX_TCC_MEM,
+ AMDGPU_GFX_TCA_MEM,
+ AMDGPU_GFX_TCI_MEM,
+ AMDGPU_GFX_TCP_MEM,
+ AMDGPU_GFX_TD_MEM,
+ AMDGPU_GFX_TCX_MEM,
+ AMDGPU_GFX_ATC_L2_MEM,
+ AMDGPU_GFX_UTCL2_MEM,
+ AMDGPU_GFX_VML2_MEM,
+ AMDGPU_GFX_VML2_WALKER_MEM,
+ AMDGPU_GFX_MEM_TYPE_NUM
};
struct amdgpu_mec {
@@ -75,8 +110,10 @@ struct amdgpu_mec {
u32 num_mec;
u32 num_pipe_per_mec;
u32 num_queue_per_pipe;
- void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
+ void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES];
+};
+struct amdgpu_mec_bitmap {
/* These are the resources for which amdgpu takes ownership */
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
};
@@ -120,6 +157,7 @@ struct amdgpu_kiq {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
const struct kiq_pm4_funcs *pmf;
+ void *mqd_backup;
};
/*
@@ -230,23 +268,37 @@ struct amdgpu_gfx_ras {
struct amdgpu_iv_entry *entry);
};
+struct amdgpu_gfx_shadow_info {
+ u32 shadow_size;
+ u32 shadow_alignment;
+ u32 csa_size;
+ u32 csa_alignment;
+};
+
struct amdgpu_gfx_funcs {
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance);
- void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd,
+ u32 sh_num, u32 instance, int xcc_id);
+ void (*read_wave_data)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t *dst, int *no_fields);
- void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd,
+ void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t thread, uint32_t start,
uint32_t size, uint32_t *dst);
- void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd,
+ void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t start, uint32_t size,
uint32_t *dst);
void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe,
- u32 queue, u32 vmid);
+ u32 queue, u32 vmid, u32 xcc_id);
void (*init_spm_golden)(struct amdgpu_device *adev);
void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
+ int (*get_gfx_shadow_info)(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info);
+ enum amdgpu_gfx_partition
+ (*query_partition_mode)(struct amdgpu_device *adev);
+ int (*switch_partition_mode)(struct amdgpu_device *adev,
+ int num_xccs_per_xcp);
+ int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node);
};
struct sq_work {
@@ -296,7 +348,8 @@ struct amdgpu_gfx {
struct amdgpu_ce ce;
struct amdgpu_me me;
struct amdgpu_mec mec;
- struct amdgpu_kiq kiq;
+ struct amdgpu_mec_bitmap mec_bitmap[AMDGPU_MAX_GC_INSTANCES];
+ struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES];
struct amdgpu_imu imu;
bool rs64_enable; /* firmware format */
const struct firmware *me_fw; /* ME firmware */
@@ -376,15 +429,32 @@ struct amdgpu_gfx {
struct amdgpu_ring sw_gfx_ring[AMDGPU_MAX_SW_GFX_RINGS];
struct amdgpu_ring_mux muxer;
- enum amdgpu_gfx_partition partition_mode;
- uint32_t num_xcd;
+ bool cp_gfx_shadow; /* for gfx11 */
+
+ uint16_t xcc_mask;
uint32_t num_xcc_per_xcp;
+ struct mutex partition_mutex;
+ bool mcbp; /* mid command buffer preemption */
};
+struct amdgpu_gfx_ras_reg_entry {
+ struct amdgpu_ras_err_status_reg_entry reg_entry;
+ enum amdgpu_gfx_ras_mem_id_type mem_id_type;
+ uint32_t se_num;
+};
+
+struct amdgpu_gfx_ras_mem_id_entry {
+ const struct amdgpu_ras_memory_id_entry *mem_id_ent;
+ uint32_t size;
+};
+
+#define AMDGPU_GFX_MEMID_ENT(x) {(x), ARRAY_SIZE(x)},
+
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
-#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
-#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid))
+#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id)))
+#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id)))
#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
+#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si)))
/**
* amdgpu_gfx_create_bitmask - create a bitmask
@@ -404,19 +474,21 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq);
+ struct amdgpu_irq_src *irq, int xcc_id);
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring);
-void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id);
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
- unsigned hpd_size);
+ unsigned hpd_size, int xcc_id);
int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
- unsigned mqd_size);
-void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev);
-int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev);
-int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev);
+ unsigned mqd_size, int xcc_id);
+void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id);
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev);
@@ -425,8 +497,8 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
int pipe, int queue);
void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue);
-bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
- int pipe, int queue);
+bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int xcc_id,
+ int mec, int pipe, int queue);
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
@@ -458,4 +530,33 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id)
int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
+
+bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
+ void *ras_error_status,
+ void (*func)(struct amdgpu_device *adev, void *ras_error_status,
+ int xcc_id));
+
+static inline const char *amdgpu_gfx_compute_mode_desc(int mode)
+{
+ switch (mode) {
+ case AMDGPU_SPX_PARTITION_MODE:
+ return "SPX";
+ case AMDGPU_DPX_PARTITION_MODE:
+ return "DPX";
+ case AMDGPU_TPX_PARTITION_MODE:
+ return "TPX";
+ case AMDGPU_QPX_PARTITION_MODE:
+ return "QPX";
+ case AMDGPU_CPX_PARTITION_MODE:
+ return "CPX";
+ default:
+ return "UNKNOWN";
+ }
+
+ return "UNKNOWN";
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 4e2531758866..d78bd9732543 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -534,22 +534,21 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
* subject to change when ring number changes
* Engine 17: Gart flushes
*/
-#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
-#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+#define AMDGPU_VMHUB_INV_ENG_BITMAP 0x1FFF3
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
- {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
- GFXHUB_FREE_VM_INV_ENGS_BITMAP};
+ unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = {0};
unsigned i;
unsigned vmhub, inv_eng;
- if (adev->enable_mes) {
+ /* init the vm inv eng for all vmhubs */
+ for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
+ vm_inv_engs[i] = AMDGPU_VMHUB_INV_ENG_BITMAP;
/* reserve engine 5 for firmware */
- for (vmhub = 0; vmhub < AMDGPU_MAX_VMHUBS; vmhub++)
- vm_inv_engs[vmhub] &= ~(1 << 5);
+ if (adev->enable_mes)
+ vm_inv_engs[i] &= ~(1 << 5);
}
for (i = 0; i < adev->num_rings; ++i) {
@@ -593,6 +592,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(9, 3, 0):
/* GC 10.3.7 */
case IP_VERSION(10, 3, 7):
+ /* GC 11.0.1 */
+ case IP_VERSION(11, 0, 1):
if (amdgpu_tmz == 0) {
adev->gmc.tmz_enabled = false;
dev_info(adev->dev,
@@ -616,7 +617,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1):
/* YELLOW_CARP*/
case IP_VERSION(10, 3, 3):
- case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
/* Don't enable it by default yet.
*/
@@ -670,7 +670,7 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + hub->ctx_distance * i;
- tmp = (hub_type == AMDGPU_GFXHUB_0) ?
+ tmp = (hub_type == AMDGPU_GFXHUB(0)) ?
RREG32_SOC15_IP(GC, reg) :
RREG32_SOC15_IP(MMHUB, reg);
@@ -679,7 +679,7 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
else
tmp &= ~hub->vm_cntx_cntl_vm_fault;
- (hub_type == AMDGPU_GFXHUB_0) ?
+ (hub_type == AMDGPU_GFXHUB(0)) ?
WREG32_SOC15_IP(GC, reg, tmp) :
WREG32_SOC15_IP(MMHUB, reg, tmp);
}
@@ -892,3 +892,47 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
return 0;
}
+
+static ssize_t current_memory_partition_show(
+ struct device *dev, struct device_attribute *addr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ enum amdgpu_memory_partition mode;
+
+ mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ switch (mode) {
+ case AMDGPU_NPS1_PARTITION_MODE:
+ return sysfs_emit(buf, "NPS1\n");
+ case AMDGPU_NPS2_PARTITION_MODE:
+ return sysfs_emit(buf, "NPS2\n");
+ case AMDGPU_NPS3_PARTITION_MODE:
+ return sysfs_emit(buf, "NPS3\n");
+ case AMDGPU_NPS4_PARTITION_MODE:
+ return sysfs_emit(buf, "NPS4\n");
+ case AMDGPU_NPS6_PARTITION_MODE:
+ return sysfs_emit(buf, "NPS6\n");
+ case AMDGPU_NPS8_PARTITION_MODE:
+ return sysfs_emit(buf, "NPS8\n");
+ default:
+ return sysfs_emit(buf, "UNKNOWN\n");
+ }
+
+ return sysfs_emit(buf, "UNKNOWN\n");
+}
+
+static DEVICE_ATTR_RO(current_memory_partition);
+
+int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev)
+{
+ if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
+ return 0;
+
+ return device_create_file(adev->dev,
+ &dev_attr_current_memory_partition);
+}
+
+void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev)
+{
+ device_remove_file(adev->dev, &dev_attr_current_memory_partition);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 6d105d7fb98b..fdc25cd559b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -63,6 +63,16 @@
struct firmware;
+enum amdgpu_memory_partition {
+ UNKNOWN_MEMORY_PARTITION_MODE = 0,
+ AMDGPU_NPS1_PARTITION_MODE = 1,
+ AMDGPU_NPS2_PARTITION_MODE = 2,
+ AMDGPU_NPS3_PARTITION_MODE = 3,
+ AMDGPU_NPS4_PARTITION_MODE = 4,
+ AMDGPU_NPS6_PARTITION_MODE = 6,
+ AMDGPU_NPS8_PARTITION_MODE = 8,
+};
+
/*
* GMC page fault information
*/
@@ -119,7 +129,8 @@ struct amdgpu_gmc_funcs {
uint32_t vmhub, uint32_t flush_type);
/* flush the vm tlb via pasid */
int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid,
- uint32_t flush_type, bool all_hub);
+ uint32_t flush_type, bool all_hub,
+ uint32_t inst);
/* flush the vm tlb via ring */
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
@@ -137,8 +148,15 @@ struct amdgpu_gmc_funcs {
void (*get_vm_pte)(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags);
+ /* override per-page pte flags */
+ void (*override_vm_pte_flags)(struct amdgpu_device *dev,
+ struct amdgpu_vm *vm,
+ uint64_t addr, uint64_t *flags);
/* get the amount of memory used by the vbios for pre-OS console */
unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
+
+ enum amdgpu_memory_partition (*query_mem_partition_mode)(
+ struct amdgpu_device *adev);
};
struct amdgpu_xgmi_ras {
@@ -164,6 +182,21 @@ struct amdgpu_xgmi {
struct amdgpu_xgmi_ras *ras;
};
+struct amdgpu_mem_partition_info {
+ union {
+ struct {
+ uint32_t fpfn;
+ uint32_t lpfn;
+ } range;
+ struct {
+ int node;
+ } numa;
+ };
+ uint64_t size;
+};
+
+#define INVALID_PFN -1
+
struct amdgpu_gmc {
/* FB's physical address in MMIO space (for CPU to
* map FB). This is different compared to the agp/
@@ -250,7 +283,10 @@ struct amdgpu_gmc {
uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
bool tmz_enabled;
+ bool is_app_apu;
+ struct amdgpu_mem_partition_info *mem_partitions;
+ uint8_t num_mem_partitions;
const struct amdgpu_gmc_funcs *gmc_funcs;
struct amdgpu_xgmi xgmi;
@@ -265,6 +301,8 @@ struct amdgpu_gmc {
/* MALL size */
u64 mall_size;
+ uint32_t m_half_use;
+
/* number of UMC instances */
int num_umc;
/* mode2 save restore */
@@ -293,17 +331,22 @@ struct amdgpu_gmc {
u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[16];
u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[16];
u64 MC_VM_MX_L1_TLB_CNTL;
+
+ u64 noretry_flags;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
-#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \
+#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub, inst) \
((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \
- ((adev), (pasid), (type), (allhub)))
+ ((adev), (pasid), (type), (allhub), (inst)))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
+#define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \
+ (adev)->gmc.gmc_funcs->override_vm_pte_flags \
+ ((adev), (vm), (addr), (pte_flags))
#define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev))
/**
@@ -373,4 +416,7 @@ uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);
uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
int amdgpu_gmc_vram_checking(struct amdgpu_device *adev);
+int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index 2dadcfe43d03..081267161d40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -190,8 +190,8 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
pr_debug("hmm range: start = 0x%lx, end = 0x%lx",
hmm_range->start, hmm_range->end);
- /* Assuming 512MB takes maxmium 1 second to fault page address */
- timeout = max((hmm_range->end - hmm_range->start) >> 29, 1UL);
+ /* Assuming 128MB takes maximum 1 second to fault page address */
+ timeout = max((hmm_range->end - hmm_range->start) >> 27, 1UL);
timeout *= HMM_RANGE_DEFAULT_TIMEOUT;
timeout = jiffies + msecs_to_jiffies(timeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 4ff348e10e4d..6aa3b1d845ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -62,7 +62,7 @@
* Returns 0 on success, error on failure.
*/
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned size, enum amdgpu_ib_pool_type pool_type,
+ unsigned int size, enum amdgpu_ib_pool_type pool_type,
struct amdgpu_ib *ib)
{
int r;
@@ -123,7 +123,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
* a CONST_IB), it will be put on the ring prior to the DE IB. Prior
* to SI there was just a DE IB.
*/
-int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
struct amdgpu_ib *ibs, struct amdgpu_job *job,
struct dma_fence **f)
{
@@ -131,14 +131,16 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
bool need_ctx_switch;
- unsigned patch_offset = ~0;
+ unsigned int patch_offset = ~0;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
uint32_t status = 0, alloc_size;
- unsigned fence_flags = 0;
- bool secure;
+ unsigned int fence_flags = 0;
+ bool secure, init_shadow;
+ u64 shadow_va, csa_va, gds_va;
+ int vmid = AMDGPU_JOB_GET_VMID(job);
- unsigned i;
+ unsigned int i;
int r = 0;
bool need_pipe_sync = false;
@@ -150,9 +152,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
vm = job->vm;
fence_ctx = job->base.s_fence ?
job->base.s_fence->scheduled.context : 0;
+ shadow_va = job->shadow_va;
+ csa_va = job->csa_va;
+ gds_va = job->gds_va;
+ init_shadow = job->init_shadow;
} else {
vm = NULL;
fence_ctx = 0;
+ shadow_va = 0;
+ csa_va = 0;
+ gds_va = 0;
+ init_shadow = false;
}
if (!ring->sched.ready && !ring->is_mes_queue) {
@@ -212,7 +222,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
amdgpu_ring_ib_begin(ring);
- if (job && ring->funcs->init_cond_exec)
+
+ if (ring->funcs->emit_gfx_shadow)
+ amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va,
+ init_shadow, vmid);
+
+ if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
amdgpu_device_flush_hdp(adev, ring);
@@ -263,6 +278,18 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
}
+ if (ring->funcs->emit_gfx_shadow) {
+ amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0);
+
+ if (ring->funcs->init_cond_exec) {
+ unsigned int ce_offset = ~0;
+
+ ce_offset = amdgpu_ring_init_cond_exec(ring);
+ if (ce_offset != ~0 && ring->funcs->patch_cond_exec)
+ amdgpu_ring_patch_cond_exec(ring, ce_offset);
+ }
+ }
+
r = amdgpu_fence_emit(ring, f, job, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
@@ -358,7 +385,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
long tmo_gfx, tmo_mm;
int r, ret = 0;
- unsigned i;
+ unsigned int i;
tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
if (amdgpu_sriov_vf(adev)) {
@@ -375,7 +402,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
/* for CP & SDMA engines since they are scheduled together so
* need to make the timeout width enough to cover the time
* cost waiting for it coming back under RUNTIME only
- */
+ */
tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
} else if (adev->gmc.xgmi.hive_id) {
tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
@@ -436,15 +463,15 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct amdgpu_device *adev = m->private;
- seq_printf(m, "--------------------- DELAYED --------------------- \n");
+ seq_puts(m, "--------------------- DELAYED ---------------------\n");
amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
m);
- seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
+ seq_puts(m, "-------------------- IMMEDIATE --------------------\n");
amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
m);
- seq_printf(m, "--------------------- DIRECT ---------------------- \n");
+ seq_puts(m, "--------------------- DIRECT ----------------------\n");
amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index c991ca0b7a1c..ff1ea99292fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -409,7 +409,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !idle)
goto error;
- if (vm->reserved_vmid[vmhub]) {
+ if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id)
goto error;
@@ -460,14 +460,11 @@ error:
}
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock);
- if (vm->reserved_vmid[vmhub])
- goto unlock;
++id_mgr->reserved_use_count;
if (!id_mgr->reserved) {
@@ -479,27 +476,23 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
list_del_init(&id->list);
id_mgr->reserved = id;
}
- vm->reserved_vmid[vmhub] = true;
-unlock:
mutex_unlock(&id_mgr->lock);
return 0;
}
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock);
- if (vm->reserved_vmid[vmhub] &&
- !--id_mgr->reserved_use_count) {
+ if (!--id_mgr->reserved_use_count) {
/* give the reserved ID back to normal round robin */
list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
id_mgr->reserved = NULL;
}
- vm->reserved_vmid[vmhub] = false;
+
mutex_unlock(&id_mgr->lock);
}
@@ -578,6 +571,10 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
+ /* alloc a default reserved vmid to enforce isolation */
+ if (enforce_isolation)
+ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
+
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index d1cc09b45da4..fa8c42c83d5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -79,11 +79,9 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- unsigned vmhub);
+ unsigned vmhub);
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- unsigned vmhub);
+ unsigned vmhub);
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job, struct dma_fence **fence);
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 1d5af50331e4..f3b0aaf3ebc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -138,6 +138,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
/**
* amdgpu_ih_ring_write - write IV to the ring buffer
*
+ * @adev: amdgpu_device pointer
* @ih: ih ring to write to
* @iv: the iv to write
* @num_dw: size of the iv in dw
@@ -145,8 +146,8 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
* Writes an IV to the ring buffer using the CPU and increment the wptr.
* Used for testing and delegating IVs to a software ring.
*/
-void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
- unsigned int num_dw)
+void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ const uint32_t *iv, unsigned int num_dw)
{
uint32_t wptr = le32_to_cpu(*ih->wptr_cpu) >> 2;
unsigned int i;
@@ -161,6 +162,9 @@ void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
if (wptr != READ_ONCE(ih->rptr)) {
wmb();
WRITE_ONCE(*ih->wptr_cpu, cpu_to_le32(wptr));
+ } else if (adev->irq.retry_cam_enabled) {
+ dev_warn_once(adev->dev, "IH soft ring buffer overflow 0x%X, 0x%X\n",
+ wptr, ih->rptr);
}
}
@@ -270,7 +274,7 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
entry->timestamp_src = dw[2] >> 31;
entry->pasid = dw[3] & 0xffff;
- entry->pasid_src = dw[3] >> 31;
+ entry->node_id = (dw[3] >> 16) & 0xff;
entry->src_data[0] = dw[4];
entry->src_data[1] = dw[5];
entry->src_data[2] = dw[6];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index dd1c2eded6b9..6c6184f0dbc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -27,6 +27,9 @@
/* Maximum number of IVs processed at once */
#define AMDGPU_IH_MAX_NUM_IVS 32
+#define IH_RING_SIZE (256 * 1024)
+#define IH_SW_RING_SIZE (8 * 1024) /* enough for 256 CAM entries */
+
struct amdgpu_device;
struct amdgpu_iv_entry;
@@ -97,8 +100,8 @@ struct amdgpu_ih_funcs {
int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
unsigned ring_size, bool use_bus_addr);
void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
-void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
- unsigned int num_dw);
+void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ const uint32_t *iv, unsigned int num_dw);
int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih);
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index fafebec5b7b6..fa6d0adcec20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -99,6 +99,21 @@ const char *soc15_ih_clientid_name[] = {
"MP1"
};
+const int node_id_to_phys_map[NODEID_MAX] = {
+ [AID0_NODEID] = 0,
+ [XCD0_NODEID] = 0,
+ [XCD1_NODEID] = 1,
+ [AID1_NODEID] = 1,
+ [XCD2_NODEID] = 2,
+ [XCD3_NODEID] = 3,
+ [AID2_NODEID] = 2,
+ [XCD4_NODEID] = 4,
+ [XCD5_NODEID] = 5,
+ [AID3_NODEID] = 3,
+ [XCD6_NODEID] = 6,
+ [XCD7_NODEID] = 7,
+};
+
/**
* amdgpu_irq_disable_all - disable *all* interrupts
*
@@ -109,7 +124,7 @@ const char *soc15_ih_clientid_name[] = {
void amdgpu_irq_disable_all(struct amdgpu_device *adev)
{
unsigned long irqflags;
- unsigned i, j, k;
+ unsigned int i, j, k;
int r;
spin_lock_irqsave(&adev->irq.lock, irqflags);
@@ -124,7 +139,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
continue;
for (k = 0; k < src->num_types; ++k) {
- atomic_set(&src->enabled_types[k], 0);
r = src->funcs->set(adev, src, k,
AMDGPU_IRQ_STATE_DISABLE);
if (r)
@@ -268,11 +282,11 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
int nvec = pci_msix_vec_count(adev->pdev);
unsigned int flags;
- if (nvec <= 0) {
+ if (nvec <= 0)
flags = PCI_IRQ_MSI;
- } else {
+ else
flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
- }
+
/* we only need one vector */
nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
if (nvec > 0) {
@@ -331,7 +345,7 @@ void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
*/
void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
{
- unsigned i, j;
+ unsigned int i, j;
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
@@ -365,7 +379,7 @@ void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
* 0 on success or error code otherwise
*/
int amdgpu_irq_add_id(struct amdgpu_device *adev,
- unsigned client_id, unsigned src_id,
+ unsigned int client_id, unsigned int src_id,
struct amdgpu_irq_src *source)
{
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
@@ -417,7 +431,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
{
u32 ring_index = ih->rptr >> 2;
struct amdgpu_iv_entry entry;
- unsigned client_id, src_id;
+ unsigned int client_id, src_id;
struct amdgpu_irq_src *src;
bool handled = false;
int r;
@@ -453,7 +467,8 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
handled = true;
} else {
- DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
+ DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n",
+ src_id, client_id);
}
/* Send it to amdkfd as well if it isn't already handled */
@@ -478,7 +493,7 @@ void amdgpu_irq_delegate(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry,
unsigned int num_dw)
{
- amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
+ amdgpu_ih_ring_write(adev, &adev->irq.ih_soft, entry->iv_entry, num_dw);
schedule_work(&adev->irq.ih_soft_work);
}
@@ -492,7 +507,7 @@ void amdgpu_irq_delegate(struct amdgpu_device *adev,
* Updates interrupt state for the specific source (all ASICs).
*/
int amdgpu_irq_update(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src, unsigned type)
+ struct amdgpu_irq_src *src, unsigned int type)
{
unsigned long irqflags;
enum amdgpu_interrupt_state state;
@@ -501,7 +516,8 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
spin_lock_irqsave(&adev->irq.lock, irqflags);
/* We need to determine after taking the lock, otherwise
- we might disable just enabled interrupts again */
+ * we might disable just enabled interrupts again
+ */
if (amdgpu_irq_enabled(adev, src, type))
state = AMDGPU_IRQ_STATE_ENABLE;
else
@@ -555,7 +571,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
* 0 on success or error code otherwise
*/
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
- unsigned type)
+ unsigned int type)
{
if (!adev->irq.installed)
return -ENOENT;
@@ -585,7 +601,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
* 0 on success or error code otherwise
*/
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
- unsigned type)
+ unsigned int type)
{
if (!adev->irq.installed)
return -ENOENT;
@@ -619,7 +635,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
* invalid parameters
*/
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
- unsigned type)
+ unsigned int type)
{
if (!adev->irq.installed)
return false;
@@ -732,7 +748,7 @@ void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
* Returns:
* Linux IRQ
*/
-unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
+unsigned int amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned int src_id)
{
adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index be243adf3e65..04c0b4fa17a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -53,7 +53,7 @@ struct amdgpu_iv_entry {
uint64_t timestamp;
unsigned timestamp_src;
unsigned pasid;
- unsigned pasid_src;
+ unsigned node_id;
unsigned src_data[AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW];
const uint32_t *iv_entry;
};
@@ -102,6 +102,24 @@ struct amdgpu_irq {
bool retry_cam_enabled;
};
+enum interrupt_node_id_per_aid {
+ AID0_NODEID = 0,
+ XCD0_NODEID = 1,
+ XCD1_NODEID = 2,
+ AID1_NODEID = 4,
+ XCD2_NODEID = 5,
+ XCD3_NODEID = 6,
+ AID2_NODEID = 8,
+ XCD4_NODEID = 9,
+ XCD5_NODEID = 10,
+ AID3_NODEID = 12,
+ XCD6_NODEID = 13,
+ XCD7_NODEID = 14,
+ NODEID_MAX,
+};
+
+extern const int node_id_to_phys_map[NODEID_MAX];
+
void amdgpu_irq_disable_all(struct amdgpu_device *adev);
int amdgpu_irq_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index c3d9d75143f4..78476bc75b4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -65,6 +65,8 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
ti.process_name, ti.tgid, ti.task_name, ti.pid);
+ dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+
if (amdgpu_device_should_recover_gpu(ring->adev)) {
struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context));
@@ -107,7 +109,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
(*job)->vm = vm;
amdgpu_sync_create(&(*job)->explicit_sync);
- (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+ (*job)->generation = amdgpu_vm_generation(adev, vm);
(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
if (!entity)
@@ -256,16 +258,27 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
struct dma_fence *fence = NULL;
int r;
+ /* Ignore soft recovered fences here */
+ r = drm_sched_entity_error(s_entity);
+ if (r && r != -ENODATA)
+ goto error;
+
if (!fence && job->gang_submit)
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
while (!fence && job->vm && !job->vmid) {
r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
- if (r)
+ if (r) {
DRM_ERROR("Error getting VM ID (%d)\n", r);
+ goto error;
+ }
}
return fence;
+
+error:
+ dma_fence_set_error(&job->base.s_fence->finished, r);
+ return NULL;
}
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
@@ -282,7 +295,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
trace_amdgpu_sched_run_job(job);
/* Skip job if VRAM is lost and never resubmit gangs */
- if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter) ||
+ if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
(job->job_run_counter && job->gang_submit))
dma_fence_set_error(finished, -ECANCELED);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 52f2e313ea17..a963a25ddd62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -61,12 +61,18 @@ struct amdgpu_job {
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
- uint32_t vram_lost_counter;
+ uint64_t generation;
/* user fence handling */
uint64_t uf_addr;
uint64_t uf_sequence;
+ /* virtual addresses for shadow/GDS/CSA */
+ uint64_t shadow_va;
+ uint64_t csa_va;
+ uint64_t gds_va;
+ bool init_shadow;
+
/* job_run_counter >= 1 means a resubmit job */
uint32_t job_run_counter;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index b07c000fc8ba..2ff2897fd1db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -45,13 +45,14 @@ int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
{
- int i;
+ int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
- amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec);
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
+ amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec[j]);
}
mutex_destroy(&adev->jpeg.jpeg_pg_lock);
@@ -76,13 +77,14 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, jpeg.idle_work.work);
unsigned int fences = 0;
- unsigned int i;
+ unsigned int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
- fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec);
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
+ fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]);
}
if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt))
@@ -122,18 +124,21 @@ int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev))
return 0;
- WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
- amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch, 0));
- amdgpu_ring_write(ring, 0xDEADBEEF);
+ WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe], 0xCAFEDEAD);
+ /* Add a read register to make sure the write register is executed. */
+ RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
+
+ amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch[ring->pipe], 0));
+ amdgpu_ring_write(ring, 0xABADCAFE);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch);
- if (tmp == 0xDEADBEEF)
+ tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
+ if (tmp == 0xABADCAFE)
break;
udelay(1);
}
@@ -161,8 +166,7 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
ib = &job->ibs[0];
- ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0,
- PACKETJ_TYPE0);
+ ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch[ring->pipe], 0, 0, PACKETJ_TYPE0);
ib->ptr[1] = 0xDEADBEEF;
for (i = 2; i < 16; i += 2) {
ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
@@ -208,7 +212,7 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch);
+ tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
@@ -241,6 +245,32 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
+int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r, i;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i) ||
+ !adev->jpeg.inst[i].ras_poison_irq.funcs)
+ continue;
+
+ r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+ }
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+ return r;
+}
+
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
{
int err;
@@ -262,7 +292,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
adev->jpeg.ras_if = &ras->ras_block.ras_comm;
if (!ras->ras_block.ras_late_init)
- ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 0ca76f0f23e9..ffe47e9f5bf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -26,19 +26,22 @@
#include "amdgpu_ras.h"
-#define AMDGPU_MAX_JPEG_INSTANCES 2
+#define AMDGPU_MAX_JPEG_INSTANCES 4
+#define AMDGPU_MAX_JPEG_RINGS 8
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
struct amdgpu_jpeg_reg{
- unsigned jpeg_pitch;
+ unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS];
};
struct amdgpu_jpeg_inst {
- struct amdgpu_ring ring_dec;
+ struct amdgpu_ring ring_dec[AMDGPU_MAX_JPEG_RINGS];
struct amdgpu_irq_src irq;
+ struct amdgpu_irq_src ras_poison_irq;
struct amdgpu_jpeg_reg external;
+ uint8_t aid_id;
};
struct amdgpu_jpeg_ras {
@@ -48,6 +51,7 @@ struct amdgpu_jpeg_ras {
struct amdgpu_jpeg {
uint8_t num_jpeg_inst;
struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
+ unsigned num_jpeg_rings;
struct amdgpu_jpeg_reg internal;
unsigned harvest_config;
struct delayed_work idle_work;
@@ -56,6 +60,9 @@ struct amdgpu_jpeg {
atomic_t total_submission_cnt;
struct ras_common_if *ras_if;
struct amdgpu_jpeg_ras *ras;
+
+ uint16_t inst_mask;
+ uint8_t num_inst_per_aid;
};
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
@@ -72,6 +79,8 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 0efb38539d70..99f4df133ed3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -462,8 +462,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->jpeg.harvest_config & (1 << i))
continue;
- if (adev->jpeg.inst[i].ring_dec.sched.ready)
- ++num_rings;
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
+ if (adev->jpeg.inst[i].ring_dec[j].sched.ready)
+ ++num_rings;
}
ib_start_alignment = 16;
ib_size_alignment = 16;
@@ -556,6 +557,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
crtc = (struct drm_crtc *)minfo->crtcs[i];
if (crtc && crtc->base.id == info->mode_crtc.id) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
ui32 = amdgpu_crtc->crtc_id;
found = 1;
break;
@@ -574,7 +576,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (ret)
return ret;
- ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
+ ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip)));
return ret ? -EFAULT : 0;
}
case AMDGPU_INFO_HW_IP_COUNT: {
@@ -720,17 +722,18 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: {
- unsigned n, alloc_size;
+ unsigned int n, alloc_size;
uint32_t *regs;
- unsigned se_num = (info->read_mmr_reg.instance >>
+ unsigned int se_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SE_INDEX_MASK;
- unsigned sh_num = (info->read_mmr_reg.instance >>
+ unsigned int sh_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SH_INDEX_MASK;
/* set full masks if the userspace set all bits
- * in the bitfields */
+ * in the bitfields
+ */
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
se_num = 0xffffffff;
else if (se_num >= AMDGPU_GFX_MAX_SE)
@@ -804,7 +807,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
- if (amdgpu_mcbp)
+ if (adev->gfx.mcbp)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
if (amdgpu_is_tmz(adev))
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
@@ -876,13 +879,26 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu;
dev_info->mall_size = adev->gmc.mall_size;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ struct amdgpu_gfx_shadow_info shadow_info;
+
+ ret = amdgpu_gfx_get_gfx_shadow_info(adev, &shadow_info);
+ if (!ret) {
+ dev_info->shadow_size = shadow_info.shadow_size;
+ dev_info->shadow_alignment = shadow_info.shadow_alignment;
+ dev_info->csa_size = shadow_info.csa_size;
+ dev_info->csa_alignment = shadow_info.csa_alignment;
+ }
+ }
+
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
return ret;
}
case AMDGPU_INFO_VCE_CLOCK_TABLE: {
- unsigned i;
+ unsigned int i;
struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
struct amd_vce_state *vce_state;
@@ -1003,7 +1019,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
/* get average GPU power */
if (amdgpu_dpm_read_sensor(adev,
- AMDGPU_PP_SENSOR_GPU_POWER,
+ AMDGPU_PP_SENSOR_GPU_AVG_POWER,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
@@ -1088,6 +1104,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct drm_amdgpu_info_video_caps *caps;
int r;
+ if (!adev->asic_funcs->query_video_codecs)
+ return -EINVAL;
+
switch (info->video_cap.type) {
case AMDGPU_INFO_VIDEO_CAPS_DECODE:
r = amdgpu_asic_query_video_codecs(adev, false, &codecs);
@@ -1140,6 +1159,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
kfree(caps);
return r;
}
+ case AMDGPU_INFO_MAX_IBS: {
+ uint32_t max_ibs[AMDGPU_HW_IP_NUM];
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
+ max_ibs[i] = amdgpu_ring_max_ibs(i);
+
+ return copy_to_user(out, max_ibs,
+ min((size_t)size, sizeof(max_ibs))) ? -EFAULT : 0;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1206,7 +1234,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
pasid = 0;
}
- r = amdgpu_vm_init(adev, &fpriv->vm);
+ r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
+ if (r)
+ goto error_pasid;
+
+ r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
if (r)
goto error_pasid;
@@ -1220,7 +1252,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto error_vm;
}
- if (amdgpu_mcbp) {
+ if (adev->gfx.mcbp) {
uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
@@ -1284,12 +1316,12 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
amdgpu_vce_free_handles(adev, file_priv);
- if (amdgpu_mcbp) {
- /* TODO: how to handle reserve failure */
- BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
- amdgpu_vm_bo_del(adev, fpriv->csa_va);
+ if (fpriv->csa_va) {
+ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ WARN_ON(amdgpu_unmap_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
+ fpriv->csa_va, csa_addr));
fpriv->csa_va = NULL;
- amdgpu_bo_unreserve(adev->virt.csa_obj);
}
pasid = fpriv->vm.pasid;
@@ -1441,7 +1473,7 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct amdgpu_device *adev = m->private;
struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw;
struct atom_context *ctx = adev->mode_info.atom_context;
@@ -1449,7 +1481,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
int ret, i;
static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
-#define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type
+#define TA_FW_NAME(type)[TA_FW_TYPE_PSP_##type] = #type
TA_FW_NAME(XGMI),
TA_FW_NAME(RAS),
TA_FW_NAME(HDCP),
@@ -1548,7 +1580,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
fw_info.feature, fw_info.ver);
/* RLCV */
- query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
@@ -1692,7 +1724,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
- seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
+ seq_printf(m, "VBIOS version: %s\n", ctx->vbios_pn);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index f0f00466b59f..b6015157763a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -22,6 +22,7 @@
*/
#include <linux/firmware.h>
+#include <drm/drm_exec.h>
#include "amdgpu_mes.h"
#include "amdgpu.h"
@@ -38,120 +39,70 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
PAGE_SIZE);
}
-int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
- unsigned int *doorbell_index)
-{
- int r = ida_simple_get(&adev->mes.doorbell_ida, 2,
- adev->mes.max_doorbell_slices,
- GFP_KERNEL);
- if (r > 0)
- *doorbell_index = r;
-
- return r;
-}
-
-void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
- unsigned int doorbell_index)
-{
- if (doorbell_index)
- ida_simple_remove(&adev->mes.doorbell_ida, doorbell_index);
-}
-
-unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
- struct amdgpu_device *adev,
- uint32_t doorbell_index,
- unsigned int doorbell_id)
-{
- return ((doorbell_index *
- amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32) +
- doorbell_id * 2);
-}
-
-static int amdgpu_mes_queue_doorbell_get(struct amdgpu_device *adev,
+static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
struct amdgpu_mes_process *process,
int ip_type, uint64_t *doorbell_index)
{
unsigned int offset, found;
+ struct amdgpu_mes *mes = &adev->mes;
- if (ip_type == AMDGPU_RING_TYPE_SDMA) {
+ if (ip_type == AMDGPU_RING_TYPE_SDMA)
offset = adev->doorbell_index.sdma_engine[0];
- found = find_next_zero_bit(process->doorbell_bitmap,
- AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
- offset);
- } else {
- found = find_first_zero_bit(process->doorbell_bitmap,
- AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS);
- }
+ else
+ offset = 0;
- if (found >= AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS) {
+ found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
+ if (found >= mes->num_mes_dbs) {
DRM_WARN("No doorbell available\n");
return -ENOSPC;
}
- set_bit(found, process->doorbell_bitmap);
-
- *doorbell_index = amdgpu_mes_get_doorbell_dw_offset_in_bar(adev,
- process->doorbell_index, found);
+ set_bit(found, mes->doorbell_bitmap);
+ /* Get the absolute doorbell index on BAR */
+ *doorbell_index = mes->db_start_dw_offset + found * 2;
return 0;
}
-static void amdgpu_mes_queue_doorbell_free(struct amdgpu_device *adev,
+static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
struct amdgpu_mes_process *process,
uint32_t doorbell_index)
{
- unsigned int old, doorbell_id;
+ unsigned int old, rel_index;
+ struct amdgpu_mes *mes = &adev->mes;
- doorbell_id = doorbell_index -
- (process->doorbell_index *
- amdgpu_mes_doorbell_process_slice(adev)) / sizeof(u32);
- doorbell_id /= 2;
-
- old = test_and_clear_bit(doorbell_id, process->doorbell_bitmap);
+ /* Find the relative index of the doorbell in this object */
+ rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
+ old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
WARN_ON(!old);
}
static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
{
- size_t doorbell_start_offset;
- size_t doorbell_aperture_size;
- size_t doorbell_process_limit;
- size_t aggregated_doorbell_start;
int i;
+ struct amdgpu_mes *mes = &adev->mes;
- aggregated_doorbell_start = (adev->doorbell_index.max_assignment + 1) * sizeof(u32);
- aggregated_doorbell_start =
- roundup(aggregated_doorbell_start, PAGE_SIZE);
-
- doorbell_start_offset = aggregated_doorbell_start + PAGE_SIZE;
- doorbell_start_offset =
- roundup(doorbell_start_offset,
- amdgpu_mes_doorbell_process_slice(adev));
-
- doorbell_aperture_size = adev->doorbell.size;
- doorbell_aperture_size =
- rounddown(doorbell_aperture_size,
- amdgpu_mes_doorbell_process_slice(adev));
-
- if (doorbell_aperture_size > doorbell_start_offset)
- doorbell_process_limit =
- (doorbell_aperture_size - doorbell_start_offset) /
- amdgpu_mes_doorbell_process_slice(adev);
- else
- return -ENOSPC;
-
- adev->mes.doorbell_id_offset = doorbell_start_offset / sizeof(u32);
- adev->mes.max_doorbell_slices = doorbell_process_limit;
+ /* Bitmap for dynamic allocation of kernel doorbells */
+ mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
+ if (!mes->doorbell_bitmap) {
+ DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
+ return -ENOMEM;
+ }
- /* allocate Qword range for aggregated doorbell */
- for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++)
- adev->mes.aggregated_doorbells[i] =
- aggregated_doorbell_start / sizeof(u32) + i * 2;
+ mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
+ for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
+ adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
+ set_bit(i, mes->doorbell_bitmap);
+ }
- DRM_INFO("max_doorbell_slices=%zu\n", doorbell_process_limit);
return 0;
}
+static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
+{
+ bitmap_free(adev->mes.doorbell_bitmap);
+}
+
int amdgpu_mes_init(struct amdgpu_device *adev)
{
int i, r;
@@ -250,6 +201,7 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
+ amdgpu_mes_doorbell_free(adev);
idr_destroy(&adev->mes.pasid_idr);
idr_destroy(&adev->mes.gang_id_idr);
@@ -278,15 +230,6 @@ int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
return -ENOMEM;
}
- process->doorbell_bitmap =
- kzalloc(DIV_ROUND_UP(AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
- BITS_PER_BYTE), GFP_KERNEL);
- if (!process->doorbell_bitmap) {
- DRM_ERROR("failed to allocate doorbell bitmap\n");
- kfree(process);
- return -ENOMEM;
- }
-
/* allocate the process context bo and map it */
r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
@@ -313,15 +256,6 @@ int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
goto clean_up_ctx;
}
- /* allocate the starting doorbell index of the process */
- r = amdgpu_mes_alloc_process_doorbells(adev, &process->doorbell_index);
- if (r < 0) {
- DRM_ERROR("failed to allocate doorbell for process\n");
- goto clean_up_pasid;
- }
-
- DRM_DEBUG("process doorbell index = %d\n", process->doorbell_index);
-
INIT_LIST_HEAD(&process->gang_list);
process->vm = vm;
process->pasid = pasid;
@@ -331,15 +265,12 @@ int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
amdgpu_mes_unlock(&adev->mes);
return 0;
-clean_up_pasid:
- idr_remove(&adev->mes.pasid_idr, pasid);
- amdgpu_mes_unlock(&adev->mes);
clean_up_ctx:
+ amdgpu_mes_unlock(&adev->mes);
amdgpu_bo_free_kernel(&process->proc_ctx_bo,
&process->proc_ctx_gpu_addr,
&process->proc_ctx_cpu_ptr);
clean_up_memory:
- kfree(process->doorbell_bitmap);
kfree(process);
return r;
}
@@ -385,7 +316,6 @@ void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
}
- amdgpu_mes_free_process_doorbells(adev, process->doorbell_index);
idr_remove(&adev->mes.pasid_idr, pasid);
amdgpu_mes_unlock(&adev->mes);
@@ -407,7 +337,6 @@ void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
amdgpu_bo_free_kernel(&process->proc_ctx_bo,
&process->proc_ctx_gpu_addr,
&process->proc_ctx_cpu_ptr);
- kfree(process->doorbell_bitmap);
kfree(process);
}
@@ -642,6 +571,8 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
unsigned long flags;
int r;
+ memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
+
/* allocate the mes queue buffer */
queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
if (!queue) {
@@ -679,7 +610,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
*queue_id = queue->queue_id = r;
/* allocate a doorbell index for the queue */
- r = amdgpu_mes_queue_doorbell_get(adev, gang->process,
+ r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
qprops->queue_type,
&qprops->doorbell_off);
if (r)
@@ -737,7 +668,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
return 0;
clean_up_doorbell:
- amdgpu_mes_queue_doorbell_free(adev, gang->process,
+ amdgpu_mes_kernel_doorbell_free(adev, gang->process,
qprops->doorbell_off);
clean_up_queue_id:
spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
@@ -792,7 +723,7 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
queue_id);
list_del(&queue->list);
- amdgpu_mes_queue_doorbell_free(adev, gang->process,
+ amdgpu_mes_kernel_doorbell_free(adev, gang->process,
queue->doorbell_off);
amdgpu_mes_unlock(&adev->mes);
@@ -924,6 +855,43 @@ error:
return r;
}
+int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
+ uint64_t process_context_addr,
+ uint32_t spi_gdbg_per_vmid_cntl,
+ const uint32_t *tcp_watch_cntl,
+ uint32_t flags,
+ bool trap_en)
+{
+ struct mes_misc_op_input op_input = {0};
+ int r;
+
+ if (!adev->mes.funcs->misc_op) {
+ DRM_ERROR("mes set shader debugger is not supported!\n");
+ return -EINVAL;
+ }
+
+ op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
+ op_input.set_shader_debugger.process_context_addr = process_context_addr;
+ op_input.set_shader_debugger.flags.u32all = flags;
+ op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
+ memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
+ sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
+
+ if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
+ AMDGPU_MES_API_VERSION_SHIFT) >= 14)
+ op_input.set_shader_debugger.trap_en = trap_en;
+
+ amdgpu_mes_lock(&adev->mes);
+
+ r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ if (r)
+ DRM_ERROR("failed to set_shader_debugger\n");
+
+ amdgpu_mes_unlock(&adev->mes);
+
+ return r;
+}
+
static void
amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
@@ -1131,34 +1099,31 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
struct amdgpu_mes_ctx_data *ctx_data)
{
struct amdgpu_bo_va *bo_va;
- struct ww_acquire_ctx ticket;
- struct list_head list;
- struct amdgpu_bo_list_entry pd;
- struct ttm_validate_buffer csa_tv;
struct amdgpu_sync sync;
+ struct drm_exec exec;
int r;
amdgpu_sync_create(&sync);
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&csa_tv.head);
-
- csa_tv.bo = &ctx_data->meta_data_obj->tbo;
- csa_tv.num_shared = 1;
-
- list_add(&csa_tv.head, &list);
- amdgpu_vm_get_pd_bo(vm, &list, &pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
- if (r) {
- DRM_ERROR("failed to reserve meta data BO: err=%d\n", r);
- return r;
+ drm_exec_init(&exec, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_lock_obj(&exec,
+ &ctx_data->meta_data_obj->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error_fini_exec;
+
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error_fini_exec;
}
bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
if (!bo_va) {
- ttm_eu_backoff_reservation(&ticket, &list);
DRM_ERROR("failed to create bo_va for meta data BO\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto error_fini_exec;
}
r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
@@ -1168,33 +1133,35 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
if (r) {
DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
- goto error;
+ goto error_del_bo_va;
}
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) {
DRM_ERROR("failed to do vm_bo_update on meta data\n");
- goto error;
+ goto error_del_bo_va;
}
amdgpu_sync_fence(&sync, bo_va->last_pt_update);
r = amdgpu_vm_update_pdes(adev, vm, false);
if (r) {
DRM_ERROR("failed to update pdes on meta data\n");
- goto error;
+ goto error_del_bo_va;
}
amdgpu_sync_fence(&sync, vm->last_update);
amdgpu_sync_wait(&sync, false);
- ttm_eu_backoff_reservation(&ticket, &list);
+ drm_exec_fini(&exec);
amdgpu_sync_free(&sync);
ctx_data->meta_data_va = bo_va;
return 0;
-error:
+error_del_bo_va:
amdgpu_vm_bo_del(adev, bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
+
+error_fini_exec:
+ drm_exec_fini(&exec);
amdgpu_sync_free(&sync);
return r;
}
@@ -1205,34 +1172,30 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
struct amdgpu_bo *bo = ctx_data->meta_data_obj;
struct amdgpu_vm *vm = bo_va->base.vm;
- struct amdgpu_bo_list_entry vm_pd;
- struct list_head list, duplicates;
- struct dma_fence *fence = NULL;
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
- long r = 0;
-
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&duplicates);
-
- tv.bo = &bo->tbo;
- tv.num_shared = 2;
- list_add(&tv.head, &list);
-
- amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
-
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
- if (r) {
- dev_err(adev->dev, "leaking bo va because "
- "we fail to reserve bo (%ld)\n", r);
- return r;
+ struct dma_fence *fence;
+ struct drm_exec exec;
+ long r;
+
+ drm_exec_init(&exec, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_lock_obj(&exec,
+ &ctx_data->meta_data_obj->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto out_unlock;
+
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto out_unlock;
}
amdgpu_vm_bo_del(adev, bo_va);
if (!amdgpu_vm_ready(vm))
goto out_unlock;
- r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
+ r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ &fence);
if (r)
goto out_unlock;
if (fence) {
@@ -1251,7 +1214,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
out_unlock:
if (unlikely(r < 0))
dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
- ttm_eu_backoff_reservation(&ticket, &list);
+ drm_exec_fini(&exec);
return r;
}
@@ -1305,14 +1268,9 @@ static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
if (!ring)
continue;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- DRM_DEV_ERROR(ring->adev->dev,
- "ring %s test failed (%d)\n",
- ring->name, r);
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- } else
- DRM_INFO("ring %s test pass\n", ring->name);
r = amdgpu_ring_test_ib(ring, 1000 * 10);
if (r) {
@@ -1350,7 +1308,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
goto error_pasid;
}
- r = amdgpu_vm_init(adev, vm);
+ r = amdgpu_vm_init(adev, vm, -1);
if (r) {
DRM_ERROR("failed to initialize vm\n");
goto error_pasid;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 547ec35691fa..a27b424ffe00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -27,6 +27,7 @@
#include "amdgpu_irq.h"
#include "kgd_kfd_interface.h"
#include "amdgpu_gfx.h"
+#include "amdgpu_doorbell.h"
#include <linux/sched/mm.h>
#define AMDGPU_MES_MAX_COMPUTE_PIPES 8
@@ -76,7 +77,6 @@ struct amdgpu_mes {
uint32_t kiq_version;
uint32_t total_max_queue;
- uint32_t doorbell_id_offset;
uint32_t max_doorbell_slices;
uint64_t default_process_quantum;
@@ -128,6 +128,11 @@ struct amdgpu_mes {
int (*kiq_hw_init)(struct amdgpu_device *adev);
int (*kiq_hw_fini)(struct amdgpu_device *adev);
+ /* MES doorbells */
+ uint32_t db_start_dw_offset;
+ uint32_t num_mes_dbs;
+ unsigned long *doorbell_bitmap;
+
/* ip specific functions */
const struct amdgpu_mes_funcs *funcs;
};
@@ -142,7 +147,6 @@ struct amdgpu_mes_process {
uint64_t process_quantum;
struct list_head gang_list;
uint32_t doorbell_index;
- unsigned long *doorbell_bitmap;
struct mutex doorbell_lock;
};
@@ -219,9 +223,12 @@ struct mes_add_queue_input {
uint32_t gws_size;
uint64_t tba_addr;
uint64_t tma_addr;
+ uint32_t trap_en;
+ uint32_t skip_process_ctx_clear;
uint32_t is_kfd_process;
uint32_t is_aql_queue;
uint32_t queue_size;
+ uint32_t exclusively_scheduled;
};
struct mes_remove_queue_input {
@@ -256,6 +263,7 @@ enum mes_misc_opcode {
MES_MISC_OP_READ_REG,
MES_MISC_OP_WRM_REG_WAIT,
MES_MISC_OP_WRM_REG_WR_WAIT,
+ MES_MISC_OP_SET_SHADER_DEBUGGER,
};
struct mes_misc_op_input {
@@ -278,6 +286,21 @@ struct mes_misc_op_input {
uint32_t reg0;
uint32_t reg1;
} wrm_reg;
+
+ struct {
+ uint64_t process_context_addr;
+ union {
+ struct {
+ uint64_t single_memop : 1;
+ uint64_t single_alu_op : 1;
+ uint64_t reserved: 30;
+ };
+ uint32_t u32all;
+ } flags;
+ uint32_t spi_gdbg_per_vmid_cntl;
+ uint32_t tcp_watch_cntl[4];
+ uint32_t trap_en;
+ } set_shader_debugger;
};
};
@@ -340,6 +363,12 @@ int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
+int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
+ uint64_t process_context_addr,
+ uint32_t spi_gdbg_per_vmid_cntl,
+ const uint32_t *tcp_watch_cntl,
+ uint32_t flags,
+ bool trap_en);
int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
int queue_type, int idx,
@@ -362,14 +391,6 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
int amdgpu_mes_self_test(struct amdgpu_device *adev);
-int amdgpu_mes_alloc_process_doorbells(struct amdgpu_device *adev,
- unsigned int *doorbell_index);
-void amdgpu_mes_free_process_doorbells(struct amdgpu_device *adev,
- unsigned int doorbell_index);
-unsigned int amdgpu_mes_get_doorbell_dw_offset_in_bar(
- struct amdgpu_device *adev,
- uint32_t doorbell_index,
- unsigned int doorbell_id);
int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index d21bb6dae56e..1ca9d4ed8063 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -21,6 +21,29 @@
#ifndef __AMDGPU_MMHUB_H__
#define __AMDGPU_MMHUB_H__
+enum amdgpu_mmhub_ras_memory_id {
+ AMDGPU_MMHUB_WGMI_PAGEMEM = 0,
+ AMDGPU_MMHUB_RGMI_PAGEMEM = 1,
+ AMDGPU_MMHUB_WDRAM_PAGEMEM = 2,
+ AMDGPU_MMHUB_RDRAM_PAGEMEM = 3,
+ AMDGPU_MMHUB_WIO_CMDMEM = 4,
+ AMDGPU_MMHUB_RIO_CMDMEM = 5,
+ AMDGPU_MMHUB_WGMI_CMDMEM = 6,
+ AMDGPU_MMHUB_RGMI_CMDMEM = 7,
+ AMDGPU_MMHUB_WDRAM_CMDMEM = 8,
+ AMDGPU_MMHUB_RDRAM_CMDMEM = 9,
+ AMDGPU_MMHUB_MAM_DMEM0 = 10,
+ AMDGPU_MMHUB_MAM_DMEM1 = 11,
+ AMDGPU_MMHUB_MAM_DMEM2 = 12,
+ AMDGPU_MMHUB_MAM_DMEM3 = 13,
+ AMDGPU_MMHUB_WRET_TAGMEM = 19,
+ AMDGPU_MMHUB_RRET_TAGMEM = 20,
+ AMDGPU_MMHUB_WIO_DATAMEM = 21,
+ AMDGPU_MMHUB_WGMI_DATAMEM = 22,
+ AMDGPU_MMHUB_WDRAM_DATAMEM = 23,
+ AMDGPU_MMHUB_MEMORY_BLOCK_LAST,
+};
+
struct amdgpu_mmhub_ras {
struct amdgpu_ras_block_object ras_block;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index a3bc00577a7c..51ca544a7094 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -45,6 +45,22 @@ int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev)
return 0;
}
+u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+ if (adev->nbio.funcs && adev->nbio.funcs->get_pcie_replay_count)
+ return adev->nbio.funcs->get_pcie_replay_count(adev);
+
+ return 0;
+}
+
+void amdgpu_nbio_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1)
+{
+ if (adev->nbio.funcs->get_pcie_usage)
+ adev->nbio.funcs->get_pcie_usage(adev, count0, count1);
+
+}
+
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index c686ff4bcc39..6cf7a8829a52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -61,6 +61,7 @@ struct amdgpu_nbio_funcs {
u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_hi_offset)(struct amdgpu_device *adev);
u32 (*get_pcie_port_index_offset)(struct amdgpu_device *adev);
u32 (*get_pcie_port_data_offset)(struct amdgpu_device *adev);
u32 (*get_rev_id)(struct amdgpu_device *adev);
@@ -95,6 +96,12 @@ struct amdgpu_nbio_funcs {
void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev);
void (*clear_doorbell_interrupt)(struct amdgpu_device *adev);
u32 (*get_rom_offset)(struct amdgpu_device *adev);
+ int (*get_compute_partition_mode)(struct amdgpu_device *adev);
+ u32 (*get_memory_partition_mode)(struct amdgpu_device *adev,
+ u32 *supp_modes);
+ u64 (*get_pcie_replay_count)(struct amdgpu_device *adev);
+ void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1);
};
struct amdgpu_nbio {
@@ -107,5 +114,8 @@ struct amdgpu_nbio {
};
int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
+void amdgpu_nbio_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, uint64_t *count1);
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 2bd1a54ee866..ace837cfa0a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -79,9 +79,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
struct amdgpu_bo_vm *vmbo;
+ bo = shadow_bo->parent;
vmbo = to_amdgpu_bo_vm(bo);
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&vmbo->shadow_list)) {
@@ -130,16 +131,26 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
- unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
-
- places[c].fpfn = 0;
- places[c].lpfn = 0;
+ unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
+ int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
+
+ if (adev->gmc.mem_partitions && mem_id >= 0) {
+ places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
+ /*
+ * memory partition range lpfn is inclusive start + size - 1
+ * TTM place lpfn is exclusive start + size
+ */
+ places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
+ } else {
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ }
places[c].mem_type = TTM_PL_VRAM;
places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- places[c].lpfn = visible_pfn;
- else if (adev->gmc.real_vram_size != adev->gmc.visible_vram_size)
+ places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn);
+ else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
@@ -147,6 +158,14 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
+ if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) {
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].mem_type = AMDGPU_PL_DOORBELL;
+ places[c].flags = 0;
+ c++;
+ }
+
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
@@ -466,7 +485,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
goto fail;
}
- /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
+ /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */
return true;
fail:
@@ -574,6 +593,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bo->flags = bp->flags;
+ if (adev->gmc.mem_partitions)
+ /* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */
+ bo->xcp_id = bp->xcp_id_plus1 - 1;
+ else
+ /* For GPUs without spatial partitioning */
+ bo->xcp_id = 0;
+
if (!amdgpu_bo_support_uswc(bo->flags))
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
@@ -610,7 +636,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bo->tbo.resource->mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
- r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
+ r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true);
if (unlikely(r))
goto fail_unreserve;
@@ -694,11 +720,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
- INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
- /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
- * is initialized.
- */
- bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
return r;
}
@@ -715,6 +736,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
mutex_lock(&adev->shadow_list_lock);
list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
+ vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
+ vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
mutex_unlock(&adev->shadow_list_lock);
}
@@ -935,7 +958,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
amdgpu_bo_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
- unsigned fpfn, lpfn;
+ unsigned int fpfn, lpfn;
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
@@ -1014,9 +1037,10 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
+
}
-static const char *amdgpu_vram_names[] = {
+static const char * const amdgpu_vram_names[] = {
"UNKNOWN",
"GDDR1",
"DDR2",
@@ -1044,7 +1068,7 @@ static const char *amdgpu_vram_names[] = {
int amdgpu_bo_init(struct amdgpu_device *adev)
{
/* On A+A platform, VRAM can be mapped as WB */
- if (!adev->gmc.xgmi.connected_to_cpu) {
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
/* reserve PAT memory space to WC for VRAM */
int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
adev->gmc.aper_size);
@@ -1080,8 +1104,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
amdgpu_ttm_fini(adev);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
-
- if (!adev->gmc.xgmi.connected_to_cpu) {
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
arch_phys_wc_del(adev->gmc.vram_mtrr);
arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
}
@@ -1148,8 +1171,8 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
* Returns:
* 0 for success or a negative error code on failure.
*/
-int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
- uint32_t metadata_size, uint64_t flags)
+int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata,
+ u32 metadata_size, uint64_t flags)
{
struct amdgpu_bo_user *ubo;
void *buffer;
@@ -1268,8 +1291,12 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
- unsigned int domain;
uint64_t size = amdgpu_bo_size(bo);
+ unsigned int domain;
+
+ /* Abort if the BO doesn't currently have a backing store */
+ if (!bo->tbo.resource)
+ return;
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
@@ -1338,7 +1365,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
return;
- r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
+ r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true);
if (!WARN_ON(r)) {
amdgpu_bo_fence(abo, fence, false);
dma_fence_put(fence);
@@ -1557,23 +1584,31 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{
struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf;
- unsigned int domain;
const char *placement;
unsigned int pin_count;
u64 size;
- domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
- switch (domain) {
- case AMDGPU_GEM_DOMAIN_VRAM:
- placement = "VRAM";
- break;
- case AMDGPU_GEM_DOMAIN_GTT:
- placement = " GTT";
- break;
- case AMDGPU_GEM_DOMAIN_CPU:
- default:
- placement = " CPU";
- break;
+ if (dma_resv_trylock(bo->tbo.base.resv)) {
+ unsigned int domain;
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+ switch (domain) {
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ if (amdgpu_bo_in_cpu_visible_vram(bo))
+ placement = "VRAM VISIBLE";
+ else
+ placement = "VRAM";
+ break;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ placement = "GTT";
+ break;
+ case AMDGPU_GEM_DOMAIN_CPU:
+ default:
+ placement = "CPU";
+ break;
+ }
+ dma_resv_unlock(bo->tbo.base.resv);
+ } else {
+ placement = "UNKNOWN";
}
size = amdgpu_bo_size(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 35b8106816a1..f3ee83cdf97e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -56,6 +56,8 @@ struct amdgpu_bo_param {
bool no_wait_gpu;
struct dma_resv *resv;
void (*destroy)(struct ttm_buffer_object *bo);
+ /* xcp partition number plus 1, 0 means any partition */
+ int8_t xcp_id_plus1;
};
/* bo virtual addresses in a vm */
@@ -108,6 +110,13 @@ struct amdgpu_bo {
struct mmu_interval_notifier notifier;
#endif
struct kgd_mem *kfd_bo;
+
+ /*
+ * For GPUs with spatial partitioning, xcp partition number, -1 means
+ * any partition. For other ASICs without spatial partition, always 0
+ * for memory accounting.
+ */
+ int8_t xcp_id;
};
struct amdgpu_bo_user {
@@ -173,6 +182,8 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
return AMDGPU_GEM_DOMAIN_GWS;
case AMDGPU_PL_OA:
return AMDGPU_GEM_DOMAIN_OA;
+ case AMDGPU_PL_DOORBELL:
+ return AMDGPU_GEM_DOMAIN_DOORBELL;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 71ee361d0972..6e91ea1de5aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -276,9 +276,8 @@ static void amdgpu_perf_read(struct perf_event *event)
(!pe->adev->df.funcs->pmc_get_count))
return;
+ prev = local64_read(&hwc->prev_count);
do {
- prev = local64_read(&hwc->prev_count);
-
switch (hwc->config_base) {
case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
@@ -289,7 +288,7 @@ static void amdgpu_perf_read(struct perf_event *event)
count = 0;
break;
}
- } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
+ } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, count));
local64_add(count - prev, &event->count);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 9d7e6e0e73ed..8fdca54bb8a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -45,9 +45,6 @@
#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
-static int psp_sysfs_init(struct amdgpu_device *adev);
-static void psp_sysfs_fini(struct amdgpu_device *adev);
-
static int psp_load_smu_fw(struct psp_context *psp);
static int psp_rap_terminate(struct psp_context *psp);
static int psp_securedisplay_terminate(struct psp_context *psp);
@@ -146,6 +143,10 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 0):
adev->virt.autoload_ucode_id = 0;
break;
+ case IP_VERSION(13, 0, 6):
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ ret &= psp_init_ta_microcode(psp, ucode_prefix);
+ break;
case IP_VERSION(13, 0, 10):
adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
ret = psp_init_cap_microcode(psp, ucode_prefix);
@@ -177,9 +178,11 @@ static int psp_early_init(void *handle)
psp->autoload_supported = false;
break;
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 7):
+ adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
+ fallthrough;
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
- case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 0, 12):
@@ -199,8 +202,8 @@ static int psp_early_init(void *handle)
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
- case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
+ case IP_VERSION(14, 0, 0):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
@@ -212,8 +215,10 @@ static int psp_early_init(void *handle)
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 10):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
break;
case IP_VERSION(13, 0, 4):
psp_v13_0_4_set_psp_funcs(psp);
@@ -329,6 +334,9 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
bool ret = false;
int i;
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6))
+ return false;
+
db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
@@ -411,7 +419,7 @@ static int psp_sw_init(void *handle)
if ((psp_get_runtime_db_entry(adev,
PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
&scpm_entry)) &&
- (SCPM_DISABLE != scpm_entry.scpm_status)) {
+ (scpm_entry.scpm_status != SCPM_DISABLE)) {
adev->scpm_enabled = true;
adev->scpm_status = scpm_entry.scpm_status;
} else {
@@ -431,14 +439,15 @@ static int psp_sw_init(void *handle)
/* If psp runtime database exists, then
* only enable two stage memory training
* when TWO_STAGE_DRAM_TRAINING bit is set
- * in runtime database */
+ * in runtime database
+ */
mem_training_ctx->enable_mem_training = true;
}
} else {
- /* If psp runtime database doesn't exist or
- * is invalid, force enable two stage memory
- * training */
+ /* If psp runtime database doesn't exist or is
+ * invalid, force enable two stage memory training
+ */
mem_training_ctx->enable_mem_training = true;
}
@@ -456,14 +465,6 @@ static int psp_sw_init(void *handle)
}
}
- if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
- adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) {
- ret= psp_sysfs_init(adev);
- if (ret) {
- return ret;
- }
- }
-
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
amdgpu_sriov_vf(adev) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
@@ -474,7 +475,8 @@ static int psp_sw_init(void *handle)
return ret;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&psp->fence_buf_bo,
&psp->fence_buf_mc_addr,
&psp->fence_buf);
@@ -482,7 +484,8 @@ static int psp_sw_init(void *handle)
goto failed1;
ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
(void **)&psp->cmd_buf_mem);
if (ret)
@@ -491,11 +494,11 @@ static int psp_sw_init(void *handle)
return 0;
failed2:
- amdgpu_bo_free_kernel(&psp->fw_pri_bo,
- &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
-failed1:
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
+failed1:
+ amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
return ret;
}
@@ -513,13 +516,11 @@ static int psp_sw_fini(void *handle)
amdgpu_ucode_release(&psp->cap_fw);
amdgpu_ucode_release(&psp->toc_fw);
- if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
- adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
- psp_sysfs_fini(adev);
-
kfree(cmd);
cmd = NULL;
+ psp_free_shared_bufs(psp);
+
if (psp->km_ring.ring_mem)
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&psp->km_ring.ring_mem_mc_addr,
@@ -560,6 +561,26 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
return -ETIME;
}
+int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
+ uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
+{
+ uint32_t val;
+ int i;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp->adev->no_hw_access)
+ return 0;
+
+ for (i = 0; i < msec_timeout; i++) {
+ val = RREG32(reg_index);
+ if ((val & mask) == reg_val)
+ return 0;
+ msleep(1);
+ }
+
+ return -ETIME;
+}
+
static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
{
switch (cmd_id) {
@@ -643,7 +664,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
- memcpy((void*)&cmd->resp, (void*)&psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
+ memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
/* In some cases, psp response status is not 0 even there is no
* problem while the command is submitted. Some version of PSP FW
@@ -699,8 +720,13 @@ static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
{
struct amdgpu_device *adev = psp->adev;
- uint32_t size = amdgpu_bo_size(tmr_bo);
- uint64_t tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
+ uint32_t size = 0;
+ uint64_t tmr_pa = 0;
+
+ if (tmr_bo) {
+ size = amdgpu_bo_size(tmr_bo);
+ tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
+ }
if (amdgpu_sriov_vf(psp->adev))
cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
@@ -745,6 +771,16 @@ static int psp_load_toc(struct psp_context *psp,
return ret;
}
+static bool psp_boottime_tmr(struct psp_context *psp)
+{
+ switch (psp->adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 6):
+ return true;
+ default:
+ return false;
+ }
+}
+
/* Set up Trusted Memory Region */
static int psp_tmr_init(struct psp_context *psp)
{
@@ -763,7 +799,8 @@ static int psp_tmr_init(struct psp_context *psp)
tmr_size = PSP_TMR_SIZE(psp->adev);
/* For ASICs support RLC autoload, psp will parse the toc
- * and calculate the total size of TMR needed */
+ * and calculate the total size of TMR needed
+ */
if (!amdgpu_sriov_vf(psp->adev) &&
psp->toc.start_addr &&
psp->toc.size_bytes &&
@@ -795,6 +832,7 @@ static bool psp_skip_tmr(struct psp_context *psp)
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
return true;
default:
@@ -816,8 +854,9 @@ static int psp_tmr_load(struct psp_context *psp)
cmd = acquire_psp_cmd_buf(psp);
psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
- DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
- amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
+ if (psp->tmr_bo)
+ DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
+ amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -828,7 +867,7 @@ static int psp_tmr_load(struct psp_context *psp)
}
static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
- struct psp_gfx_cmd_resp *cmd)
+ struct psp_gfx_cmd_resp *cmd)
{
if (amdgpu_sriov_vf(psp->adev))
cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
@@ -969,6 +1008,27 @@ static int psp_rl_load(struct amdgpu_device *adev)
return ret;
}
+int psp_spatial_partition(struct psp_context *psp, int mode)
+{
+ struct psp_gfx_cmd_resp *cmd;
+ int ret;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
+ cmd->cmd.cmd_spatial_part.mode = mode;
+
+ dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
static int psp_asd_initialize(struct psp_context *psp)
{
int ret;
@@ -1065,7 +1125,7 @@ static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
struct ta_context *context)
{
cmd->cmd_id = context->ta_load_type;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
@@ -1080,9 +1140,9 @@ int psp_ta_init_shared_buf(struct psp_context *psp,
struct ta_mem_context *mem_ctx)
{
/*
- * Allocate 16k memory aligned to 4k from Frame Buffer (local
- * physical) for ta to host memory
- */
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for ta to host memory
+ */
return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
@@ -1136,9 +1196,8 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context)
context->resp_status = cmd->resp.status;
- if (!ret) {
+ if (!ret)
context->session_id = cmd->resp.session_id;
- }
release_psp_cmd_buf(psp);
@@ -1254,8 +1313,9 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
{
- return psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
- psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b;
+ return (psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
+ psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
+ psp->adev->ip_versions[MP0_HWIP][0] >= IP_VERSION(13, 0, 6);
}
/*
@@ -1363,6 +1423,9 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
/* Invoke xgmi ta again to get the link information */
if (psp_xgmi_peer_link_info_supported(psp)) {
struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output;
+ bool requires_reflection =
+ (psp->xgmi_context.supports_extended_data && get_extended_data) ||
+ psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6);
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
@@ -1377,11 +1440,11 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
topology->nodes[i].num_links = get_extended_data ?
topology->nodes[i].num_links +
link_info_output->nodes[i].num_links :
- link_info_output->nodes[i].num_links;
+ ((requires_reflection && topology->nodes[i].num_links) ? topology->nodes[i].num_links :
+ link_info_output->nodes[i].num_links);
/* reflect the topology information for bi-directionality */
- if (psp->xgmi_context.supports_extended_data &&
- get_extended_data && topology->nodes[i].num_hops)
+ if (requires_reflection && topology->nodes[i].num_hops)
psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
}
}
@@ -1465,8 +1528,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_ras_intr_triggered())
return ret;
- if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
- {
+ if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
DRM_WARN("RAS: Unsupported Interface");
return -EINVAL;
}
@@ -1476,8 +1538,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
dev_warn(psp->adev->dev, "ECC switch disabled\n");
ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
- }
- else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+ } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
dev_warn(psp->adev->dev,
"RAS internal register access blocked\n");
@@ -1573,11 +1634,10 @@ int psp_ras_initialize(struct psp_context *psp)
if (ret)
dev_warn(adev->dev, "PSP set boot config failed\n");
else
- dev_warn(adev->dev, "GECC will be disabled in next boot cycle "
- "if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
+ dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
}
} else {
- if (1 == boot_cfg) {
+ if (boot_cfg == 1) {
dev_info(adev->dev, "GECC is enabled\n");
} else {
/* enable GECC in next boot cycle if it is disabled
@@ -1607,8 +1667,11 @@ int psp_ras_initialize(struct psp_context *psp)
if (amdgpu_ras_is_poison_mode_supported(adev))
ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
- if (!adev->gmc.xgmi.connected_to_cpu)
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
+ ras_cmd->ras_in_message.init_flags.xcc_mask =
+ adev->gfx.xcc_mask;
+ ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
ret = psp_ta_load(psp, &psp->ras_context.context);
@@ -1626,14 +1689,37 @@ int psp_ras_initialize(struct psp_context *psp)
}
int psp_ras_trigger_error(struct psp_context *psp,
- struct ta_ras_trigger_error_input *info)
+ struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
{
struct ta_ras_shared_memory *ras_cmd;
+ struct amdgpu_device *adev = psp->adev;
int ret;
+ uint32_t dev_mask;
if (!psp->ras_context.context.initialized)
return -EINVAL;
+ switch (info->block_id) {
+ case TA_RAS_BLOCK__GFX:
+ dev_mask = GET_MASK(GC, instance_mask);
+ break;
+ case TA_RAS_BLOCK__SDMA:
+ dev_mask = GET_MASK(SDMA0, instance_mask);
+ break;
+ case TA_RAS_BLOCK__VCN:
+ case TA_RAS_BLOCK__JPEG:
+ dev_mask = GET_MASK(VCN, instance_mask);
+ break;
+ default:
+ dev_mask = instance_mask;
+ break;
+ }
+
+ /* reuse sub_block_index for backward compatibility */
+ dev_mask <<= AMDGPU_RAS_INST_SHIFT;
+ dev_mask &= AMDGPU_RAS_INST_MASK;
+ info->sub_block_index |= dev_mask;
+
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
@@ -1645,7 +1731,8 @@ int psp_ras_trigger_error(struct psp_context *psp,
return -EINVAL;
/* If err_event_athub occurs error inject was successful, however
- return status from TA is no long reliable */
+ * return status from TA is no long reliable
+ */
if (amdgpu_ras_intr_triggered())
return 0;
@@ -1947,6 +2034,8 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+ /* don't try again */
+ psp->securedisplay_context.context.bin_desc.size_bytes = 0;
}
return 0;
@@ -2077,10 +2166,12 @@ static int psp_hw_start(struct psp_context *psp)
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
- ret = psp_tmr_init(psp);
- if (ret) {
- DRM_ERROR("PSP tmr init failed!\n");
- return ret;
+ if (!psp_boottime_tmr(psp)) {
+ ret = psp_tmr_init(psp);
+ if (ret) {
+ DRM_ERROR("PSP tmr init failed!\n");
+ return ret;
+ }
}
skip_pin_bo:
@@ -2362,8 +2453,8 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
return ret;
}
-static int psp_execute_non_psp_fw_load(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode)
+int psp_execute_ip_fw_load(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode)
{
int ret = 0;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
@@ -2402,12 +2493,11 @@ static int psp_load_smu_fw(struct psp_context *psp)
(adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
- if (ret) {
+ if (ret)
DRM_WARN("Failed to set MP1 state prepare for reload\n");
- }
}
- ret = psp_execute_non_psp_fw_load(psp, ucode);
+ ret = psp_execute_ip_fw_load(psp, ucode);
if (ret)
DRM_ERROR("PSP load smu failed!\n");
@@ -2449,7 +2539,7 @@ int psp_load_fw_list(struct psp_context *psp,
for (i = 0; i < ucode_count; ++i) {
ucode = ucode_list[i];
psp_print_fw_hdr(psp, ucode);
- ret = psp_execute_non_psp_fw_load(psp, ucode);
+ ret = psp_execute_ip_fw_load(psp, ucode);
if (ret)
return ret;
}
@@ -2491,12 +2581,13 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
/* PSP only receive one SDMA fw for sienna_cichlid,
- * as all four sdma fw are same */
+ * as all four sdma fw are same
+ */
continue;
psp_print_fw_hdr(psp, ucode);
- ret = psp_execute_non_psp_fw_load(psp, ucode);
+ ret = psp_execute_ip_fw_load(psp, ucode);
if (ret)
return ret;
@@ -2556,8 +2647,8 @@ static int psp_load_fw(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1) {
ret = psp_xgmi_initialize(psp, false, true);
/* Warning the XGMI seesion initialize failure
- * Instead of stop driver initialization
- */
+ * Instead of stop driver initialization
+ */
if (ret)
dev_err(psp->adev->dev,
"XGMI: Failed to initialize XGMI session\n");
@@ -2655,8 +2746,6 @@ static int psp_hw_fini(void *handle)
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
- psp_free_shared_bufs(psp);
-
return 0;
}
@@ -2716,9 +2805,8 @@ static int psp_suspend(void *handle)
}
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
- if (ret) {
+ if (ret)
DRM_ERROR("PSP ring stop failed\n");
- }
out:
return ret;
@@ -2838,19 +2926,6 @@ int psp_rlc_autoload_start(struct psp_context *psp)
return ret;
}
-int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
- uint64_t cmd_gpu_addr, int cmd_size)
-{
- struct amdgpu_firmware_info ucode = {0};
-
- ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
- AMDGPU_UCODE_ID_VCN0_RAM;
- ucode.mc_addr = cmd_gpu_addr;
- ucode.ucode_size = cmd_size;
-
- return psp_execute_non_psp_fw_load(&adev->psp, &ucode);
-}
-
int psp_ring_cmd_submit(struct psp_context *psp,
uint64_t cmd_buf_mc_addr,
uint64_t fence_mc_addr,
@@ -2967,7 +3042,7 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->sos.fw_version = le32_to_cpu(desc->fw_version);
psp->sos.feature_version = le32_to_cpu(desc->fw_version);
psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->sos.start_addr = ucode_start_addr;
+ psp->sos.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_SYS_DRV:
psp->sys.fw_version = le32_to_cpu(desc->fw_version);
@@ -3491,7 +3566,12 @@ void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size
drm_dev_exit(idx);
}
-static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
+/**
+ * DOC: usbc_pd_fw
+ * Reading from this file will retrieve the USB-C PD firmware version. Writing to
+ * this file will trigger the update process.
+ */
+static DEVICE_ATTR(usbc_pd_fw, 0644,
psp_usbc_pd_fw_sysfs_read,
psp_usbc_pd_fw_sysfs_write);
@@ -3531,7 +3611,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
adev->psp.vbflash_image_size += count;
mutex_unlock(&adev->psp.mutex);
- dev_info(adev->dev, "VBIOS flash write PSP done");
+ dev_dbg(adev->dev, "IFWI staged for update");
return count;
}
@@ -3548,7 +3628,10 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
void *fw_pri_cpu_addr;
int ret;
- dev_info(adev->dev, "VBIOS flash to PSP started");
+ if (adev->psp.vbflash_image_size == 0)
+ return -EINVAL;
+
+ dev_dbg(adev->dev, "PSP IFWI flash process initiated");
ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
AMDGPU_GPU_PAGE_SIZE,
@@ -3573,14 +3656,32 @@ rel_buf:
adev->psp.vbflash_image_size = 0;
if (ret) {
- dev_err(adev->dev, "Failed to load VBIOS FW, err = %d", ret);
+ dev_err(adev->dev, "Failed to load IFWI, err = %d", ret);
return ret;
}
- dev_info(adev->dev, "VBIOS flash to PSP done");
+ dev_dbg(adev->dev, "PSP IFWI flash process done");
return 0;
}
+/**
+ * DOC: psp_vbflash
+ * Writing to this file will stage an IFWI for update. Reading from this file
+ * will trigger the update process.
+ */
+static struct bin_attribute psp_vbflash_bin_attr = {
+ .attr = {.name = "psp_vbflash", .mode = 0660},
+ .size = 0,
+ .write = amdgpu_psp_vbflash_write,
+ .read = amdgpu_psp_vbflash_read,
+};
+
+/**
+ * DOC: psp_vbflash_status
+ * The status of the flash process.
+ * 0: IFWI flash not complete.
+ * 1: IFWI flash complete.
+ */
static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3597,43 +3698,49 @@ static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
return sysfs_emit(buf, "0x%x\n", vbflash_status);
}
+static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
-static const struct bin_attribute psp_vbflash_bin_attr = {
- .attr = {.name = "psp_vbflash", .mode = 0664},
- .size = 0,
- .write = amdgpu_psp_vbflash_write,
- .read = amdgpu_psp_vbflash_read,
+static struct bin_attribute *bin_flash_attrs[] = {
+ &psp_vbflash_bin_attr,
+ NULL
};
-static DEVICE_ATTR(psp_vbflash_status, 0444, amdgpu_psp_vbflash_status, NULL);
+static struct attribute *flash_attrs[] = {
+ &dev_attr_psp_vbflash_status.attr,
+ &dev_attr_usbc_pd_fw.attr,
+ NULL
+};
-int amdgpu_psp_sysfs_init(struct amdgpu_device *adev)
+static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
- int ret = 0;
- struct psp_context *psp = &adev->psp;
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (attr == &dev_attr_usbc_pd_fw.attr)
+ return adev->psp.sup_pd_fw_up ? 0660 : 0;
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(13, 0, 0):
- case IP_VERSION(13, 0, 7):
- if (!psp->adev) {
- psp->adev = adev;
- psp_v13_0_set_psp_funcs(psp);
- }
- ret = sysfs_create_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr);
- if (ret)
- dev_err(adev->dev, "Failed to create device file psp_vbflash");
- ret = device_create_file(adev->dev, &dev_attr_psp_vbflash_status);
- if (ret)
- dev_err(adev->dev, "Failed to create device file psp_vbflash_status");
- return ret;
- default:
- return 0;
- }
+ return adev->psp.sup_ifwi_up ? 0440 : 0;
}
+static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr,
+ int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return adev->psp.sup_ifwi_up ? 0660 : 0;
+}
+
+const struct attribute_group amdgpu_flash_attr_group = {
+ .attrs = flash_attrs,
+ .bin_attrs = bin_flash_attrs,
+ .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
+ .is_visible = amdgpu_flash_attr_is_visible,
+};
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
@@ -3652,29 +3759,7 @@ const struct amd_ip_funcs psp_ip_funcs = {
.set_powergating_state = psp_set_powergating_state,
};
-static int psp_sysfs_init(struct amdgpu_device *adev)
-{
- int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
-
- if (ret)
- DRM_ERROR("Failed to create USBC PD FW control file!");
-
- return ret;
-}
-
-void amdgpu_psp_sysfs_fini(struct amdgpu_device *adev)
-{
- sysfs_remove_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr);
- device_remove_file(adev->dev, &dev_attr_psp_vbflash_status);
-}
-
-static void psp_sysfs_fini(struct amdgpu_device *adev)
-{
- device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
-}
-
-const struct amdgpu_ip_block_version psp_v3_1_ip_block =
-{
+const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 3,
.minor = 1,
@@ -3682,8 +3767,7 @@ const struct amdgpu_ip_block_version psp_v3_1_ip_block =
.funcs = &psp_ip_funcs,
};
-const struct amdgpu_ip_block_version psp_v10_0_ip_block =
-{
+const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 10,
.minor = 0,
@@ -3691,8 +3775,7 @@ const struct amdgpu_ip_block_version psp_v10_0_ip_block =
.funcs = &psp_ip_funcs,
};
-const struct amdgpu_ip_block_version psp_v11_0_ip_block =
-{
+const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 11,
.minor = 0,
@@ -3708,8 +3791,7 @@ const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
.funcs = &psp_ip_funcs,
};
-const struct amdgpu_ip_block_version psp_v12_0_ip_block =
-{
+const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 12,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index cf4f60c66122..3384eb94fde0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -39,6 +39,8 @@
#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
+extern const struct attribute_group amdgpu_flash_attr_group;
+
enum psp_shared_mem_size {
PSP_ASD_SHARED_MEM_SIZE = 0x0,
PSP_XGMI_SHARED_MEM_SIZE = 0x4000,
@@ -78,8 +80,7 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
};
-enum psp_ring_type
-{
+enum psp_ring_type {
PSP_RING_TYPE__INVALID = 0,
/*
* These values map to the way the PSP kernel identifies the
@@ -89,8 +90,7 @@ enum psp_ring_type
PSP_RING_TYPE__KM = 2 /* Kernel mode ring (formerly called GPCOM) */
};
-struct psp_ring
-{
+struct psp_ring {
enum psp_ring_type ring_type;
struct psp_gfx_rb_frame *ring_mem;
uint64_t ring_mem_mc_addr;
@@ -107,8 +107,7 @@ enum psp_reg_prog_id {
PSP_REG_LAST
};
-struct psp_funcs
-{
+struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
int (*bootloader_load_kdb)(struct psp_context *psp);
int (*bootloader_load_spl)(struct psp_context *psp);
@@ -133,6 +132,7 @@ struct psp_funcs
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp);
+ int (*fatal_error_recovery_quirk)(struct psp_context *psp);
};
struct ta_funcs {
@@ -307,10 +307,9 @@ struct psp_runtime_scpm_entry {
enum psp_runtime_scpm_authentication scpm_status;
};
-struct psp_context
-{
- struct amdgpu_device *adev;
- struct psp_ring km_ring;
+struct psp_context {
+ struct amdgpu_device *adev;
+ struct psp_ring km_ring;
struct psp_gfx_cmd_resp *cmd;
const struct psp_funcs *funcs;
@@ -339,7 +338,7 @@ struct psp_context
uint64_t tmr_mc_addr;
/* asd firmware */
- const struct firmware *asd_fw;
+ const struct firmware *asd_fw;
/* toc firmware */
const struct firmware *toc_fw;
@@ -384,9 +383,13 @@ struct psp_context
uint32_t boot_cfg_bitmask;
- char *vbflash_tmp_buf;
- size_t vbflash_image_size;
- bool vbflash_done;
+ /* firmware upgrades supported */
+ bool sup_pd_fw_up;
+ bool sup_ifwi_up;
+
+ char *vbflash_tmp_buf;
+ size_t vbflash_image_size;
+ bool vbflash_done;
};
struct amdgpu_psp_funcs {
@@ -443,6 +446,10 @@ struct amdgpu_psp_funcs {
((psp)->funcs->vbflash_stat ? \
(psp)->funcs->vbflash_stat((psp)) : -EINVAL)
+#define psp_fatal_error_recovery_quirk(psp) \
+ ((psp)->funcs->fatal_error_recovery_quirk ? \
+ (psp)->funcs->fatal_error_recovery_quirk((psp)) : 0)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -455,10 +462,13 @@ extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, bool check_changed);
+extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, uint32_t msec_timeout);
+
+int psp_execute_ip_fw_load(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode);
int psp_gpu_reset(struct amdgpu_device *adev);
-int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
- uint64_t cmd_gpu_addr, int cmd_size);
int psp_ta_init_shared_buf(struct psp_context *psp,
struct ta_mem_context *mem_ctx);
@@ -486,7 +496,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
int psp_ras_trigger_error(struct psp_context *psp,
- struct ta_ras_trigger_error_input *info);
+ struct ta_ras_trigger_error_input *info, uint32_t instance_mask);
int psp_ras_terminate(struct psp_context *psp);
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
@@ -519,8 +529,8 @@ int psp_load_fw_list(struct psp_context *psp,
struct amdgpu_firmware_info **ucode_list, int ucode_count);
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
+int psp_spatial_partition(struct psp_context *psp, int mode);
+
int is_psp_fw_valid(struct psp_bin_desc bin);
-int amdgpu_psp_sysfs_init(struct amdgpu_device *adev);
-void amdgpu_psp_sysfs_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
index 12010c988c8b..123bcf5c2bb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -116,7 +116,6 @@ static const struct file_operations amdgpu_rap_debugfs_ops = {
void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
{
-#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
if (!adev->psp.rap_context.context.initialized)
@@ -124,5 +123,4 @@ void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
adev, &amdgpu_rap_debugfs_ops);
-#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 3ab8a88789c8..7689395e44fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -35,6 +35,7 @@
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include "nbio_v4_3.h"
+#include "nbio_v7_9.h"
#include "atom.h"
#include "amdgpu_reset.h"
@@ -171,8 +172,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
err_data.err_addr = &err_rec;
- amdgpu_umc_fill_error_record(&err_data, address,
- (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
+ amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@ -256,6 +256,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
int block_id;
uint32_t sub_block;
u64 address, value;
+ /* default value is 0 if the mask is not set by user */
+ u32 instance_mask = 0;
if (*pos)
return -EINVAL;
@@ -306,7 +308,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->op = op;
if (op == 2) {
- if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
+ if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
+ &sub_block, &address, &value, &instance_mask) != 4 &&
+ sscanf(str, "%*s %*s %*s %u %llu %llu %u",
+ &sub_block, &address, &value, &instance_mask) != 4 &&
+ sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
&sub_block, &address, &value) != 3 &&
sscanf(str, "%*s %*s %*s %u %llu %llu",
&sub_block, &address, &value) != 3)
@@ -314,6 +320,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->head.sub_block_index = sub_block;
data->inject.address = address;
data->inject.value = value;
+ data->inject.instance_mask = instance_mask;
}
} else {
if (size < sizeof(*data))
@@ -326,6 +333,46 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return 0;
}
+static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
+ struct ras_debug_if *data)
+{
+ int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
+ uint32_t mask, inst_mask = data->inject.instance_mask;
+
+ /* no need to set instance mask if there is only one instance */
+ if (num_xcc <= 1 && inst_mask) {
+ data->inject.instance_mask = 0;
+ dev_dbg(adev->dev,
+ "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
+ inst_mask);
+
+ return;
+ }
+
+ switch (data->head.block) {
+ case AMDGPU_RAS_BLOCK__GFX:
+ mask = GENMASK(num_xcc - 1, 0);
+ break;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ mask = GENMASK(adev->sdma.num_instances - 1, 0);
+ break;
+ case AMDGPU_RAS_BLOCK__VCN:
+ case AMDGPU_RAS_BLOCK__JPEG:
+ mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
+ break;
+ default:
+ mask = inst_mask;
+ break;
+ }
+
+ /* remove invalid bits in instance mask */
+ data->inject.instance_mask &= mask;
+ if (inst_mask != data->inject.instance_mask)
+ dev_dbg(adev->dev,
+ "Adjust RAS inject mask 0x%x to 0x%x\n",
+ inst_mask, data->inject.instance_mask);
+}
+
/**
* DOC: AMDGPU RAS debugfs control interface
*
@@ -341,7 +388,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
* name: the name of IP.
*
- * inject has two more members than head, they are address, value.
+ * inject has three more members than head, they are address, value and mask.
* As their names indicate, inject operation will write the
* value to the address.
*
@@ -365,7 +412,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
*
* echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
* echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
- * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
+ * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
*
* Where N, is the card which you want to affect.
*
@@ -382,13 +429,14 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
*
* The sub-block is a the sub-block index, pass 0 if there is no sub-block.
* The address and value are hexadecimal numbers, leading 0x is optional.
+ * The mask means instance mask, is optional, default value is 0x1.
*
* For instance,
*
* .. code-block:: bash
*
* echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
- * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
*
* How to check the result of the operation?
@@ -442,7 +490,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
- if ((data.inject.address >= adev->gmc.mc_vram_size) ||
+ if ((data.inject.address >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
(data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
dev_warn(adev->dev, "RAS WARN: input address "
"0x%llx is invalid.",
@@ -460,6 +509,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
break;
}
+ amdgpu_ras_instance_mask_check(adev, &data);
+
/* data.inject.address is offset instead of absolute gpu address */
ret = amdgpu_ras_error_inject(adev, &data.inject);
break;
@@ -707,16 +758,6 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev,
- struct ras_common_if *head)
-{
- if (amdgpu_ras_is_feature_allowed(adev, head) ||
- amdgpu_ras_is_poison_mode_supported(adev))
- return 1;
- else
- return 0;
-}
-
/* wrapper of psp_ras_enable_features */
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable)
@@ -728,7 +769,16 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
if (!con)
return -EINVAL;
- if (head->block == AMDGPU_RAS_BLOCK__GFX) {
+ /* Do not enable ras feature if it is not allowed */
+ if (enable &&
+ head->block != AMDGPU_RAS_BLOCK__GFX &&
+ !amdgpu_ras_is_feature_allowed(adev, head))
+ goto out;
+
+ /* Only enable gfx ras feature from host side */
+ if (head->block == AMDGPU_RAS_BLOCK__GFX &&
+ !amdgpu_sriov_vf(adev) &&
+ !amdgpu_ras_intr_triggered()) {
info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -744,16 +794,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
.error_type = amdgpu_ras_error_to_ta(head->type),
};
}
- }
-
- /* Do not enable if it is not allowed. */
- if (enable && !amdgpu_ras_check_feature_allowed(adev, head))
- goto out;
- /* Only enable ras feature operation handle on host side */
- if (head->block == AMDGPU_RAS_BLOCK__GFX &&
- !amdgpu_sriov_vf(adev) &&
- !amdgpu_ras_intr_triggered()) {
ret = psp_ras_enable_features(&adev->psp, info, enable);
if (ret) {
dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
@@ -1109,21 +1150,22 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
}
/* Calculate XGMI relative offset */
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ info->head.block != AMDGPU_RAS_BLOCK__GFX) {
block_info.address =
amdgpu_xgmi_get_relative_phy_addr(adev,
block_info.address);
}
- if (info->head.block == AMDGPU_RAS_BLOCK__GFX) {
- if (block_obj->hw_ops->ras_error_inject)
- ret = block_obj->hw_ops->ras_error_inject(adev, info);
+ if (block_obj->hw_ops->ras_error_inject) {
+ if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
+ ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
+ else /* Special ras_error_inject is defined (e.g: xgmi) */
+ ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
+ info->instance_mask);
} else {
- /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */
- if (block_obj->hw_ops->ras_error_inject)
- ret = block_obj->hw_ops->ras_error_inject(adev, &block_info);
- else /*If not defined .ras_error_inject, use default ras_error_inject*/
- ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ /* default path */
+ ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
}
if (ret)
@@ -1441,6 +1483,7 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *dir;
@@ -1451,6 +1494,7 @@ static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *
&amdgpu_ras_debugfs_eeprom_ops);
debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
&con->bad_page_cnt_threshold);
+ debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
@@ -1597,8 +1641,7 @@ static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
{
/* Fatal error events are handled on host side */
- if (amdgpu_sriov_vf(adev) ||
- !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
+ if (amdgpu_sriov_vf(adev))
return;
if (adev->nbio.ras &&
@@ -1636,8 +1679,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
}
}
- if (!adev->gmc.xgmi.connected_to_cpu)
- amdgpu_umc_poison_handler(adev, false);
+ amdgpu_umc_poison_handler(adev, false);
if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
@@ -2008,9 +2050,25 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
/* Perform full reset in fatal error mode */
if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- else
+ else {
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
+ ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ reset_context.method = AMD_RESET_METHOD_MODE2;
+ }
+
+ /* Fatal error occurs in poison mode, mode1 reset is used to
+ * recover gpu.
+ */
+ if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
+ ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
+ psp_fatal_error_recovery_quirk(&adev->psp);
+ }
+ }
+
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
}
atomic_set(&ras->in_recovery, 0);
@@ -2259,7 +2317,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
atomic_set(&con->in_recovery, 0);
con->eeprom_control.bad_channel_bitmap = 0;
- max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
+ max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
/* Todo: During test the SMU might fail to read the eeprom through I2C
@@ -2350,6 +2408,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_IP_DISCOVERY) {
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
return true;
default:
@@ -2376,10 +2435,10 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
if (!ctx)
return;
- if (strnstr(ctx->vbios_version, "D16406",
- sizeof(ctx->vbios_version)) ||
- strnstr(ctx->vbios_version, "D36002",
- sizeof(ctx->vbios_version)))
+ if (strnstr(ctx->vbios_pn, "D16406",
+ sizeof(ctx->vbios_pn)) ||
+ strnstr(ctx->vbios_pn, "D36002",
+ sizeof(ctx->vbios_pn)))
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
}
@@ -2396,11 +2455,10 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
{
adev->ras_hw_enabled = adev->ras_enabled = 0;
- if (!adev->is_atom_fw ||
- !amdgpu_ras_asic_supported(adev))
+ if (!amdgpu_ras_asic_supported(adev))
return;
- if (!adev->gmc.xgmi.connected_to_cpu) {
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
dev_info(adev->dev, "MEM ECC is active.\n");
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
@@ -2452,8 +2510,18 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
/* hw_supported needs to be aligned with RAS block mask. */
adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
- adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
- adev->ras_hw_enabled & amdgpu_ras_mask;
+
+ /*
+ * Disable ras feature for aqua vanjaram
+ * by default on apu platform.
+ */
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6) &&
+ adev->gmc.is_app_apu)
+ adev->ras_enabled = amdgpu_ras_enable != 1 ? 0 :
+ adev->ras_hw_enabled & amdgpu_ras_mask;
+ else
+ adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
+ adev->ras_hw_enabled & amdgpu_ras_mask;
}
static void amdgpu_ras_counte_dw(struct work_struct *work)
@@ -2579,6 +2647,10 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
* check DF RAS */
adev->nbio.ras = &nbio_v4_3_ras;
break;
+ case IP_VERSION(7, 9, 0):
+ if (!adev->gmc.is_app_apu)
+ adev->nbio.ras = &nbio_v7_9_ras;
+ break;
default:
/* nbio ras is not available */
break;
@@ -2625,7 +2697,8 @@ release_con:
int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
{
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ adev->gmc.is_app_apu)
return 1;
return 0;
}
@@ -2701,23 +2774,28 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
goto cleanup;
}
- r = amdgpu_ras_sysfs_create(adev, ras_block);
- if (r)
- goto interrupt;
+ if (ras_obj->hw_ops &&
+ (ras_obj->hw_ops->query_ras_error_count ||
+ ras_obj->hw_ops->query_ras_error_status)) {
+ r = amdgpu_ras_sysfs_create(adev, ras_block);
+ if (r)
+ goto interrupt;
- /* Those are the cached values at init.
- */
- query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL);
- if (!query_info)
- return -ENOMEM;
- memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
+ /* Those are the cached values at init.
+ */
+ query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
+ if (!query_info)
+ return -ENOMEM;
+ memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
- if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
- atomic_set(&con->ras_ce_count, ce_count);
- atomic_set(&con->ras_ue_count, ue_count);
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
+ atomic_set(&con->ras_ce_count, ce_count);
+ atomic_set(&con->ras_ue_count, ue_count);
+ }
+
+ kfree(query_info);
}
- kfree(query_info);
return 0;
interrupt:
@@ -2894,14 +2972,13 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
{
- amdgpu_ras_check_supported(adev);
- if (!adev->ras_hw_enabled)
- return;
-
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
dev_info(adev->dev, "uncorrectable hardware error"
"(ERREVENT_ATHUB_INTERRUPT) detected!\n");
+ ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
amdgpu_ras_reset_gpu(adev);
}
}
@@ -3069,6 +3146,10 @@ int amdgpu_ras_is_supported(struct amdgpu_device *adev,
* that the ras block supports ras function.
*/
if (!ret &&
+ (block == AMDGPU_RAS_BLOCK__GFX ||
+ block == AMDGPU_RAS_BLOCK__SDMA ||
+ block == AMDGPU_RAS_BLOCK__VCN ||
+ block == AMDGPU_RAS_BLOCK__JPEG) &&
amdgpu_ras_is_poison_mode_supported(adev) &&
amdgpu_ras_get_ras_block(adev, block, 0))
ret = 1;
@@ -3104,3 +3185,143 @@ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
return 0;
}
+
+void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
+{
+ if (!err_type_name)
+ return;
+
+ switch (err_type) {
+ case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
+ sprintf(err_type_name, "correctable");
+ break;
+ case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
+ sprintf(err_type_name, "uncorrectable");
+ break;
+ default:
+ sprintf(err_type_name, "unknown");
+ break;
+ }
+}
+
+bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_entry,
+ uint32_t instance,
+ uint32_t *memory_id)
+{
+ uint32_t err_status_lo_data, err_status_lo_offset;
+
+ if (!reg_entry)
+ return false;
+
+ err_status_lo_offset =
+ AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
+ reg_entry->seg_lo, reg_entry->reg_lo);
+ err_status_lo_data = RREG32(err_status_lo_offset);
+
+ if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
+ !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
+ return false;
+
+ *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
+
+ return true;
+}
+
+bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_entry,
+ uint32_t instance,
+ unsigned long *err_cnt)
+{
+ uint32_t err_status_hi_data, err_status_hi_offset;
+
+ if (!reg_entry)
+ return false;
+
+ err_status_hi_offset =
+ AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
+ reg_entry->seg_hi, reg_entry->reg_hi);
+ err_status_hi_data = RREG32(err_status_hi_offset);
+
+ if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
+ !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
+ /* keep the check here in case we need to refer to the result later */
+ dev_dbg(adev->dev, "Invalid err_info field\n");
+
+ /* read err count */
+ *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
+
+ return true;
+}
+
+void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_list,
+ uint32_t reg_list_size,
+ const struct amdgpu_ras_memory_id_entry *mem_list,
+ uint32_t mem_list_size,
+ uint32_t instance,
+ uint32_t err_type,
+ unsigned long *err_count)
+{
+ uint32_t memory_id;
+ unsigned long err_cnt;
+ char err_type_name[16];
+ uint32_t i, j;
+
+ for (i = 0; i < reg_list_size; i++) {
+ /* query memory_id from err_status_lo */
+ if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
+ instance, &memory_id))
+ continue;
+
+ /* query err_cnt from err_status_hi */
+ if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
+ instance, &err_cnt) ||
+ !err_cnt)
+ continue;
+
+ *err_count += err_cnt;
+
+ /* log the errors */
+ amdgpu_ras_get_error_type_name(err_type, err_type_name);
+ if (!mem_list) {
+ /* memory_list is not supported */
+ dev_info(adev->dev,
+ "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
+ err_cnt, err_type_name,
+ reg_list[i].block_name,
+ instance, memory_id);
+ } else {
+ for (j = 0; j < mem_list_size; j++) {
+ if (memory_id == mem_list[j].memory_id) {
+ dev_info(adev->dev,
+ "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
+ err_cnt, err_type_name,
+ reg_list[i].block_name,
+ instance, mem_list[j].name);
+ break;
+ }
+ }
+ }
+ }
+}
+
+void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_list,
+ uint32_t reg_list_size,
+ uint32_t instance)
+{
+ uint32_t err_status_lo_offset, err_status_hi_offset;
+ uint32_t i;
+
+ for (i = 0; i < reg_list_size; i++) {
+ err_status_lo_offset =
+ AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
+ reg_list[i].seg_lo, reg_list[i].reg_lo);
+ err_status_hi_offset =
+ AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
+ reg_list[i].seg_hi, reg_list[i].reg_hi);
+ WREG32(err_status_lo_offset, 0);
+ WREG32(err_status_hi_offset, 0);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 17b3d1992e80..ffb49b2d533a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -32,6 +32,11 @@
struct amdgpu_iv_entry;
#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0)
+/* position of instance value in sub_block_index of
+ * ta_ras_trigger_error_input, the sub block uses lower 12 bits
+ */
+#define AMDGPU_RAS_INST_MASK 0xfffff000
+#define AMDGPU_RAS_INST_SHIFT 0xc
enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
@@ -314,6 +319,46 @@ enum amdgpu_ras_ret {
AMDGPU_RAS_PT,
};
+/* ras error status reisger fields */
+#define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+#define ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define ERR_STATUS__ERR_CNT__SHIFT 0x17
+#define ERR_STATUS__ERR_CNT_MASK 0x03800000L
+
+#define AMDGPU_RAS_REG_ENTRY(ip, inst, reg_lo, reg_hi) \
+ ip##_HWIP, inst, reg_lo##_BASE_IDX, reg_lo, reg_hi##_BASE_IDX, reg_hi
+
+#define AMDGPU_RAS_REG_ENTRY_OFFSET(hwip, ip_inst, segment, reg) \
+ (adev->reg_offset[hwip][ip_inst][segment] + (reg))
+
+#define AMDGPU_RAS_ERR_INFO_VALID (1 << 0)
+#define AMDGPU_RAS_ERR_STATUS_VALID (1 << 1)
+#define AMDGPU_RAS_ERR_ADDRESS_VALID (1 << 2)
+
+#define AMDGPU_RAS_GPU_RESET_MODE2_RESET (0x1 << 0)
+#define AMDGPU_RAS_GPU_RESET_MODE1_RESET (0x1 << 1)
+
+struct amdgpu_ras_err_status_reg_entry {
+ uint32_t hwip;
+ uint32_t ip_inst;
+ uint32_t seg_lo;
+ uint32_t reg_lo;
+ uint32_t seg_hi;
+ uint32_t reg_hi;
+ uint32_t reg_inst;
+ uint32_t flags;
+ const char *block_name;
+};
+
+struct amdgpu_ras_memory_id_entry {
+ uint32_t memory_id;
+ const char *name;
+};
+
struct ras_common_if {
enum amdgpu_ras_block block;
enum amdgpu_ras_error_type type;
@@ -385,6 +430,9 @@ struct amdgpu_ras {
/* Indicates smu whether need update bad channel info */
bool update_channel_flag;
+
+ /* Record special requirements of gpu reset caller */
+ uint32_t gpu_reset_flags;
};
struct ras_fs_data {
@@ -471,6 +519,7 @@ struct ras_inject_if {
struct ras_common_if head;
uint64_t address;
uint64_t value;
+ uint32_t instance_mask;
};
struct ras_cure_if {
@@ -508,7 +557,8 @@ struct amdgpu_ras_block_object {
};
struct amdgpu_ras_block_hw_ops {
- int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
+ int (*ras_error_inject)(struct amdgpu_device *adev,
+ void *inject_if, uint32_t instance_mask);
void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status);
void (*query_ras_error_status)(struct amdgpu_device *adev);
void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status);
@@ -696,4 +746,25 @@ int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_co
int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
struct amdgpu_ras_block_object *ras_block_obj);
void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev);
+void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name);
+bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_entry,
+ uint32_t instance,
+ uint32_t *memory_id);
+bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_entry,
+ uint32_t instance,
+ unsigned long *err_cnt);
+void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_list,
+ uint32_t reg_list_size,
+ const struct amdgpu_ras_memory_id_entry *mem_list,
+ uint32_t mem_list_size,
+ uint32_t instance,
+ uint32_t err_type,
+ unsigned long *err_count);
+void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_list,
+ uint32_t reg_list_size,
+ uint32_t instance);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index c2c2a7718613..4764d2171f92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -68,11 +68,24 @@
/* Table hdr is 'AMDR' */
#define RAS_TABLE_HDR_VAL 0x414d4452
-#define RAS_TABLE_VER 0x00010000
/* Bad GPU tag ‘BADG’ */
#define RAS_TABLE_HDR_BAD 0x42414447
+/*
+ * EEPROM Table structure v1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
/* Assume 2-Mbit size EEPROM and take up the whole space. */
#define RAS_TBL_SIZE_BYTES (256 * 1024)
#define RAS_TABLE_START 0
@@ -81,6 +94,35 @@
#define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \
/ RAS_TABLE_RECORD_SIZE)
+/*
+ * EEPROM Table structrue v2.1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE RAS INFO |
+ * | (available info size 4 Bytes) |
+ * | ( reserved size 252 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
+/* EEPROM Table V2_1 */
+#define RAS_TABLE_V2_1_INFO_SIZE 256
+#define RAS_TABLE_V2_1_INFO_START RAS_TABLE_HEADER_SIZE
+#define RAS_RECORD_START_V2_1 (RAS_HDR_START + RAS_TABLE_HEADER_SIZE + \
+ RAS_TABLE_V2_1_INFO_SIZE)
+#define RAS_MAX_RECORD_COUNT_V2_1 ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) \
+ / RAS_TABLE_RECORD_SIZE)
+
/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM
* offset off of RAS_TABLE_START. That is, this is something you can
* add to control->i2c_address, and then tell I2C layer to read
@@ -103,6 +145,10 @@
#define RAS_NUM_RECS(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE)
+#define RAS_NUM_RECS_V2_1(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
+ RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) / RAS_TABLE_RECORD_SIZE)
+
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
@@ -112,6 +158,7 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 7): /* Sienna cichlid */
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 2): /* Aldebaran */
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
return true;
default:
@@ -148,9 +195,9 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
/* VEGA20 and ARCTURUS */
if (adev->asic_type == CHIP_VEGA20)
control->i2c_address = EEPROM_I2C_MADDR_0;
- else if (strnstr(atom_ctx->vbios_version,
+ else if (strnstr(atom_ctx->vbios_pn,
"D342",
- sizeof(atom_ctx->vbios_version)))
+ sizeof(atom_ctx->vbios_pn)))
control->i2c_address = EEPROM_I2C_MADDR_0;
else
control->i2c_address = EEPROM_I2C_MADDR_4;
@@ -159,13 +206,14 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
control->i2c_address = EEPROM_I2C_MADDR_0;
return true;
case IP_VERSION(13, 0, 2):
- if (strnstr(atom_ctx->vbios_version, "D673",
- sizeof(atom_ctx->vbios_version)))
+ if (strnstr(atom_ctx->vbios_pn, "D673",
+ sizeof(atom_ctx->vbios_pn)))
control->i2c_address = EEPROM_I2C_MADDR_4;
else
control->i2c_address = EEPROM_I2C_MADDR_0;
return true;
case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
control->i2c_address = EEPROM_I2C_MADDR_4;
return true;
@@ -230,6 +278,69 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
return res;
}
+static void
+__encode_table_ras_info_to_buf(struct amdgpu_ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = ((uint32_t)(rai->rma_status) & 0xFF) |
+ (((uint32_t)(rai->health_percent) << 8) & 0xFF00) |
+ (((uint32_t)(rai->ecc_page_threshold) << 16) & 0xFFFF0000);
+ pp[0] = cpu_to_le32(tmp);
+}
+
+static void
+__decode_table_ras_info_from_buf(struct amdgpu_ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = le32_to_cpu(pp[0]);
+ rai->rma_status = tmp & 0xFF;
+ rai->health_percent = (tmp >> 8) & 0xFF;
+ rai->ecc_page_threshold = (tmp >> 16) & 0xFFFF;
+}
+
+static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ u8 *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ DRM_ERROR("Failed to alloc buf to write table ras info\n");
+ return -ENOMEM;
+ }
+
+ __encode_table_ras_info_to_buf(&control->tbl_rai, buf);
+
+ /* i2c may be unstable in gpu reset */
+ down_read(&adev->reset_domain->sem);
+ res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
+ control->i2c_address +
+ control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+ up_read(&adev->reset_domain->sem);
+
+ if (res < 0) {
+ DRM_ERROR("Failed to write EEPROM table ras info:%d", res);
+ } else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ DRM_ERROR("Short write:%d out of %d\n",
+ res, RAS_TABLE_V2_1_INFO_SIZE);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ kfree(buf);
+
+ return res;
+}
+
static u8 __calc_hdr_byte_sum(const struct amdgpu_ras_eeprom_control *control)
{
int ii;
@@ -246,6 +357,21 @@ static u8 __calc_hdr_byte_sum(const struct amdgpu_ras_eeprom_control *control)
return csum;
}
+static u8 __calc_ras_info_byte_sum(const struct amdgpu_ras_eeprom_control *control)
+{
+ int ii;
+ u8 *pp, csum;
+ size_t sz;
+
+ sz = sizeof(control->tbl_rai);
+ pp = (u8 *) &control->tbl_rai;
+ csum = 0;
+ for (ii = 0; ii < sz; ii++, pp++)
+ csum += *pp;
+
+ return csum;
+}
+
static int amdgpu_ras_eeprom_correct_header_tag(
struct amdgpu_ras_eeprom_control *control,
uint32_t header)
@@ -282,6 +408,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
u8 csum;
int res;
@@ -289,14 +416,37 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
mutex_lock(&control->ras_tbl_mutex);
hdr->header = RAS_TABLE_HDR_VAL;
- hdr->version = RAS_TABLE_VER;
- hdr->first_rec_offset = RAS_RECORD_START;
- hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+ if (adev->umc.ras &&
+ adev->umc.ras->set_eeprom_table_version)
+ adev->umc.ras->set_eeprom_table_version(hdr);
+ else
+ hdr->version = RAS_TABLE_VER_V1;
+
+ if (hdr->version == RAS_TABLE_VER_V2_1) {
+ hdr->first_rec_offset = RAS_RECORD_START_V2_1;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE;
+ rai->rma_status = GPU_HEALTH_USABLE;
+ /**
+ * GPU health represented as a percentage.
+ * 0 means worst health, 100 means fully health.
+ */
+ rai->health_percent = 100;
+ /* ecc_page_threshold = 0 means disable bad page retirement */
+ rai->ecc_page_threshold = con->bad_page_cnt_threshold;
+ } else {
+ hdr->first_rec_offset = RAS_RECORD_START;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+ }
csum = __calc_hdr_byte_sum(control);
+ if (hdr->version == RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
csum = -csum;
hdr->checksum = csum;
res = __write_table_header(control);
+ if (!res && hdr->version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
control->ras_num_recs = 0;
control->ras_fri = 0;
@@ -573,11 +723,19 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_recs, ras->bad_page_cnt_threshold);
control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
}
- control->tbl_hdr.version = RAS_TABLE_VER;
- control->tbl_hdr.first_rec_offset = RAS_INDEX_TO_OFFSET(control, control->ras_fri);
- control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
control->tbl_hdr.checksum = 0;
buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
@@ -606,6 +764,17 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
goto Out;
}
+ /**
+ * bad page records have been stored in eeprom,
+ * now calculate gpu health percent
+ */
+ if (amdgpu_bad_page_threshold != 0 &&
+ control->tbl_hdr.version == RAS_TABLE_VER_V2_1 &&
+ control->ras_num_recs < ras->bad_page_cnt_threshold)
+ control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold -
+ control->ras_num_recs) * 100) /
+ ras->bad_page_cnt_threshold;
+
/* Recalc the checksum.
*/
csum = 0;
@@ -613,10 +782,14 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
csum += *pp;
csum += __calc_hdr_byte_sum(control);
+ if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
/* avoid sign extension when assigning to "checksum" */
csum = -csum;
control->tbl_hdr.checksum = csum;
res = __write_table_header(control);
+ if (!res && control->tbl_hdr.version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
Out:
kfree(buf);
return res;
@@ -807,9 +980,12 @@ Out:
return res;
}
-uint32_t amdgpu_ras_eeprom_max_record_count(void)
+uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *control)
{
- return RAS_MAX_RECORD_COUNT;
+ if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ return RAS_MAX_RECORD_COUNT_V2_1;
+ else
+ return RAS_MAX_RECORD_COUNT;
}
static ssize_t
@@ -1051,8 +1227,14 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
int buf_size, res;
u8 csum, *buf, *pp;
- buf_size = RAS_TABLE_HEADER_SIZE +
- control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1)
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
DRM_ERROR("Out of memory checking RAS table checksum.\n");
@@ -1080,6 +1262,39 @@ Out:
return res < 0 ? res : csum;
}
+static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ unsigned char *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n");
+ return -ENOMEM;
+ }
+
+ /**
+ * EEPROM table V2_1 supports ras info,
+ * read EEPROM table ras info
+ */
+ res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
+ control->i2c_address + control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+ if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res);
+ res = res >= 0 ? -EIO : res;
+ goto Out;
+ }
+
+ __decode_table_ras_info_from_buf(rai, buf);
+
+Out:
+ kfree(buf);
+ return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res;
+}
+
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
bool *exceed_err_limit)
{
@@ -1102,8 +1317,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
return -EINVAL;
control->ras_header_offset = RAS_HDR_START;
- control->ras_record_offset = RAS_RECORD_START;
- control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ control->ras_info_offset = RAS_TABLE_V2_1_INFO_START;
mutex_init(&control->ras_tbl_mutex);
/* Read the table header from EEPROM address */
@@ -1117,12 +1331,27 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
__decode_table_header_from_buf(hdr, buf);
- control->ras_num_recs = RAS_NUM_RECS(hdr);
+ if (hdr->version == RAS_TABLE_VER_V2_1) {
+ control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
+ control->ras_record_offset = RAS_RECORD_START_V2_1;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
+ } else {
+ control->ras_num_recs = RAS_NUM_RECS(hdr);
+ control->ras_record_offset = RAS_RECORD_START;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ }
control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
if (hdr->header == RAS_TABLE_HDR_VAL) {
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
control->ras_num_recs);
+
+ if (hdr->version == RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
res = __verify_ras_table_checksum(control);
if (res)
DRM_ERROR("RAS table incorrect checksum or error:%d\n",
@@ -1136,6 +1365,12 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
ras->bad_page_cnt_threshold);
} else if (hdr->header == RAS_TABLE_HDR_BAD &&
amdgpu_bad_page_threshold != 0) {
+ if (hdr->version == RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
res = __verify_ras_table_checksum(control);
if (res)
DRM_ERROR("RAS Table incorrect checksum or error:%d\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 54d9bfe0881d..6dfd667f3013 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -26,8 +26,16 @@
#include <linux/i2c.h>
+#define RAS_TABLE_VER_V1 0x00010000
+#define RAS_TABLE_VER_V2_1 0x00021000
+
struct amdgpu_device;
+enum amdgpu_ras_gpu_health_status {
+ GPU_HEALTH_USABLE = 0,
+ GPU_RETIRED__ECC_REACH_THRESHOLD = 2,
+};
+
enum amdgpu_ras_eeprom_err_type {
AMDGPU_RAS_EEPROM_ERR_NA,
AMDGPU_RAS_EEPROM_ERR_RECOVERABLE,
@@ -43,9 +51,18 @@ struct amdgpu_ras_eeprom_table_header {
uint32_t checksum;
} __packed;
+struct amdgpu_ras_eeprom_table_ras_info {
+ u8 rma_status;
+ u8 health_percent;
+ u16 ecc_page_threshold;
+ u32 padding[64 - 1];
+} __packed;
+
struct amdgpu_ras_eeprom_control {
struct amdgpu_ras_eeprom_table_header tbl_hdr;
+ struct amdgpu_ras_eeprom_table_ras_info tbl_rai;
+
/* Base I2C EEPPROM 19-bit memory address,
* where the table is located. For more information,
* see top of amdgpu_eeprom.c.
@@ -58,6 +75,7 @@ struct amdgpu_ras_eeprom_control {
* right after the header.
*/
u32 ras_header_offset;
+ u32 ras_info_offset;
u32 ras_record_offset;
/* Number of records in the table.
@@ -124,7 +142,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records, const u32 num);
-uint32_t amdgpu_ras_eeprom_max_record_count(void);
+uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *control);
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 5c4f93ee0c57..3c988cc406e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -90,6 +90,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
cur->node = block;
break;
case TTM_PL_TT:
+ case AMDGPU_PL_DOORBELL:
node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
@@ -152,6 +153,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
break;
case TTM_PL_TT:
+ case AMDGPU_PL_DOORBELL:
node = cur->node;
cur->node = ++node;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 6437ead87e5f..5fed06ffcc6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -40,6 +40,7 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
ret = aldebaran_reset_init(adev);
break;
case IP_VERSION(11, 0, 7):
@@ -61,6 +62,7 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
ret = aldebaran_reset_fini(adev);
break;
case IP_VERSION(11, 0, 7):
@@ -85,7 +87,7 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
if (!reset_handler)
- return -ENOSYS;
+ return -EOPNOTSUPP;
return reset_handler->prepare_hwcontext(adev->reset_cntl,
reset_context);
@@ -101,7 +103,7 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
if (!reset_handler)
- return -ENOSYS;
+ return -EOPNOTSUPP;
ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index dc474b809604..80d6e132e409 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -50,6 +50,26 @@
*/
/**
+ * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission.
+ *
+ * @type: ring type for which to return the limit.
+ */
+unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type)
+{
+ switch (type) {
+ case AMDGPU_RING_TYPE_GFX:
+ /* Need to keep at least 192 on GFX7+ for old radv. */
+ return 192;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ return 125;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ return 16;
+ default:
+ return 49;
+ }
+}
+
+/**
* amdgpu_ring_alloc - allocate space on the ring buffer
*
* @ring: amdgpu_ring structure holding ring information
@@ -58,7 +78,7 @@
* Allocate @ndw dwords in the ring buffer (all asics).
* Returns 0 on success, error on failure.
*/
-int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
+int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
{
/* Align requested size with padding so unlock_commit can
* pad safely */
@@ -182,6 +202,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
int sched_hw_submission = amdgpu_sched_hw_submission;
u32 *num_sched;
u32 hw_ip;
+ unsigned int max_ibs_dw;
/* Set the hw submission limit higher for KIQ because
* it's used for a number of gfx/compute tasks by both
@@ -290,6 +311,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
+ max_ibs_dw = ring->funcs->emit_frame_size +
+ amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
+ max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+
+ if (WARN_ON(max_ibs_dw > max_dw))
+ max_dw = max_ibs_dw;
+
ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
ring->buf_mask = (ring->ring_size / 4) - 1;
@@ -361,6 +389,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
+ } else {
+ kfree(ring->fence_drv.fences);
}
dma_fence_put(ring->vmid_wait);
@@ -403,11 +433,18 @@ void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
struct dma_fence *fence)
{
+ unsigned long flags;
+
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
return false;
+ spin_lock_irqsave(fence->lock, flags);
+ if (!dma_fence_is_signaled_locked(fence))
+ dma_fence_set_error(fence, -ENODATA);
+ spin_unlock_irqrestore(fence->lock, flags);
+
atomic_inc(&ring->adev->gpu_reset_counter);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
@@ -478,6 +515,70 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {
.llseek = default_llseek
};
+static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+ volatile u32 *mqd;
+ int r;
+ uint32_t value, result;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ result = 0;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
+ if (r) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ return r;
+ }
+
+ while (size) {
+ if (*pos >= ring->mqd_size)
+ goto done;
+
+ value = mqd[*pos/4];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ goto done;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
+ }
+
+done:
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ mqd = NULL;
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r)
+ return r;
+
+ return result;
+}
+
+static const struct file_operations amdgpu_debugfs_mqd_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_mqd_read,
+ .llseek = default_llseek
+};
+
+static int amdgpu_debugfs_ring_error(void *data, u64 val)
+{
+ struct amdgpu_ring *ring = data;
+
+ amdgpu_fence_driver_set_error(ring, val);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(amdgpu_debugfs_error_fops, NULL,
+ amdgpu_debugfs_ring_error, "%lld\n");
+
#endif
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
@@ -489,10 +590,21 @@ void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
char name[32];
sprintf(name, "amdgpu_ring_%s", ring->name);
- debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring,
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
&amdgpu_debugfs_ring_fops,
ring->ring_size + 12);
+ if (ring->mqd_obj) {
+ sprintf(name, "amdgpu_mqd_%s", ring->name);
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_mqd_fops,
+ ring->mqd_size);
+ }
+
+ sprintf(name, "amdgpu_error_%s", ring->name);
+ debugfs_create_file(name, 0200, root, ring,
+ &amdgpu_debugfs_error_fops);
+
#endif
}
@@ -581,3 +693,21 @@ void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_end(ring);
}
+
+void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring)
+{
+ if (ring->is_sw_ring)
+ amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL);
+}
+
+void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring)
+{
+ if (ring->is_sw_ring)
+ amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE);
+}
+
+void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
+{
+ if (ring->is_sw_ring)
+ amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index d8749444b689..e2ab303ad270 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -37,8 +37,8 @@ struct amdgpu_job;
struct amdgpu_vm;
/* max number of rings */
-#define AMDGPU_MAX_RINGS 28
-#define AMDGPU_MAX_HWIP_RINGS 8
+#define AMDGPU_MAX_RINGS 124
+#define AMDGPU_MAX_HWIP_RINGS 64
#define AMDGPU_MAX_GFX_RINGS 2
#define AMDGPU_MAX_SW_GFX_RINGS 2
#define AMDGPU_MAX_COMPUTE_RINGS 8
@@ -126,6 +126,7 @@ struct amdgpu_fence_driver {
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
@@ -212,6 +213,8 @@ struct amdgpu_ring_funcs {
void (*end_use)(struct amdgpu_ring *ring);
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+ void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va,
+ u64 gds_va, bool init_shadow, int vmid);
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
uint32_t reg_val_offs);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
@@ -227,6 +230,9 @@ struct amdgpu_ring_funcs {
int (*preempt_ib)(struct amdgpu_ring *ring);
void (*emit_mem_sync)(struct amdgpu_ring *ring);
void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
+ void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
+ void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
+ void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
};
struct amdgpu_ring {
@@ -250,12 +256,14 @@ struct amdgpu_ring {
uint32_t buf_mask;
u32 idx;
u32 xcc_id;
+ u32 xcp_id;
u32 me;
u32 pipe;
u32 queue;
struct amdgpu_bo *mqd_obj;
uint64_t mqd_gpu_addr;
void *mqd_ptr;
+ unsigned mqd_size;
uint64_t eop_gpu_addr;
u32 doorbell_index;
bool use_doorbell;
@@ -309,6 +317,7 @@ struct amdgpu_ring {
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
+#define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v) ((r)->funcs->emit_gfx_shadow((r), (s), (c), (g), (i), (v)))
#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
@@ -318,10 +327,17 @@ struct amdgpu_ring {
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
+#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
+#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
+#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
+unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
+void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring);
+void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring);
+void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring);
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
@@ -373,7 +389,7 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
occupied = ring->wptr & ring->buf_mask;
dst = (void *)&ring->ring[occupied];
chunk1 = ring->buf_mask + 1 - occupied;
- chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
+ chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1;
chunk2 = count_dw - chunk1;
chunk1 <<= 2;
chunk2 <<= 2;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index 62079f0e3ee8..e1ee1c7117fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -105,6 +105,16 @@ static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
amdgpu_fence_update_start_timestamp(e->ring,
chunk->sync_seq,
ktime_get());
+ if (chunk->sync_seq ==
+ le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) {
+ if (chunk->cntl_offset <= e->ring->buf_mask)
+ amdgpu_ring_patch_cntl(e->ring,
+ chunk->cntl_offset);
+ if (chunk->ce_offset <= e->ring->buf_mask)
+ amdgpu_ring_patch_ce(e->ring, chunk->ce_offset);
+ if (chunk->de_offset <= e->ring->buf_mask)
+ amdgpu_ring_patch_de(e->ring, chunk->de_offset);
+ }
amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring,
chunk->start,
chunk->end);
@@ -387,7 +397,7 @@ void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring)
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring);
- if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
+ if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
if (amdgpu_mcbp_scan(mux) > 0)
amdgpu_mcbp_trigger_preempt(mux);
return;
@@ -407,6 +417,20 @@ void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
amdgpu_ring_mux_end_ib(mux, ring);
}
+void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
+ unsigned offset;
+
+ if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
+ return;
+
+ offset = ring->wptr & ring->buf_mask;
+
+ amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type);
+}
+
void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
struct amdgpu_mux_entry *e;
@@ -429,6 +453,10 @@ void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *r
}
chunk->start = ring->wptr;
+ /* the initialized value used to check if they are set by the ib submission*/
+ chunk->cntl_offset = ring->buf_mask + 1;
+ chunk->de_offset = ring->buf_mask + 1;
+ chunk->ce_offset = ring->buf_mask + 1;
list_add_tail(&chunk->entry, &e->list);
}
@@ -454,6 +482,41 @@ static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct a
}
}
+void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux,
+ struct amdgpu_ring *ring, u64 offset,
+ enum amdgpu_ring_mux_offset_type type)
+{
+ struct amdgpu_mux_entry *e;
+ struct amdgpu_mux_chunk *chunk;
+
+ e = amdgpu_ring_mux_sw_entry(mux, ring);
+ if (!e) {
+ DRM_ERROR("cannot find entry!\n");
+ return;
+ }
+
+ chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
+ if (!chunk) {
+ DRM_ERROR("cannot find chunk!\n");
+ return;
+ }
+
+ switch (type) {
+ case AMDGPU_MUX_OFFSET_TYPE_CONTROL:
+ chunk->cntl_offset = offset;
+ break;
+ case AMDGPU_MUX_OFFSET_TYPE_DE:
+ chunk->de_offset = offset;
+ break;
+ case AMDGPU_MUX_OFFSET_TYPE_CE:
+ chunk->ce_offset = offset;
+ break;
+ default:
+ DRM_ERROR("invalid type (%d)\n", type);
+ break;
+ }
+}
+
void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
struct amdgpu_mux_entry *e;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
index 4be45fc14954..d3186b570b82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
@@ -50,6 +50,21 @@ struct amdgpu_mux_entry {
struct list_head list;
};
+enum amdgpu_ring_mux_offset_type {
+ AMDGPU_MUX_OFFSET_TYPE_CONTROL,
+ AMDGPU_MUX_OFFSET_TYPE_DE,
+ AMDGPU_MUX_OFFSET_TYPE_CE,
+};
+
+enum ib_complete_status {
+ /* IB not started/reset value, default value. */
+ IB_COMPLETION_STATUS_DEFAULT = 0,
+ /* IB preempted, started but not completed. */
+ IB_COMPLETION_STATUS_PREEMPTED = 1,
+ /* IB completed. */
+ IB_COMPLETION_STATUS_COMPLETED = 2,
+};
+
struct amdgpu_ring_mux {
struct amdgpu_ring *real_ring;
@@ -72,12 +87,18 @@ struct amdgpu_ring_mux {
* @sync_seq: the fence seqno related with the saved IB.
* @start:- start location on the software ring.
* @end:- end location on the software ring.
+ * @control_offset:- the PRE_RESUME bit position used for resubmission.
+ * @de_offset:- the anchor in write_data for de meta of resubmission.
+ * @ce_offset:- the anchor in write_data for ce meta of resubmission.
*/
struct amdgpu_mux_chunk {
struct list_head entry;
uint32_t sync_seq;
u64 start;
u64 end;
+ u64 cntl_offset;
+ u64 de_offset;
+ u64 ce_offset;
};
int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
@@ -89,6 +110,8 @@ u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ri
u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
+void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
+ u64 offset, enum amdgpu_ring_mux_offset_type type);
bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux);
u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring);
@@ -97,6 +120,7 @@ void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring);
void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring);
void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring);
+void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type);
const char *amdgpu_sw_ring_name(int idx);
unsigned int amdgpu_sw_ring_priority(int idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 85fb730d9fc8..35e0ae9acadc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -31,12 +31,13 @@
* amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
*
* @adev: amdgpu_device pointer
+ * @xcc_id: xcc accelerated compute core id
*
* Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
*/
-void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
- if (adev->gfx.rlc.in_safe_mode)
+ if (adev->gfx.rlc.in_safe_mode[xcc_id])
return;
/* if RLC is not enabled, do nothing */
@@ -46,8 +47,8 @@ void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
if (adev->cg_flags &
(AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG)) {
- adev->gfx.rlc.funcs->set_safe_mode(adev);
- adev->gfx.rlc.in_safe_mode = true;
+ adev->gfx.rlc.funcs->set_safe_mode(adev, xcc_id);
+ adev->gfx.rlc.in_safe_mode[xcc_id] = true;
}
}
@@ -55,12 +56,13 @@ void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
* amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
*
* @adev: amdgpu_device pointer
+ * @xcc_id: xcc accelerated compute core id
*
* Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
*/
-void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
- if (!(adev->gfx.rlc.in_safe_mode))
+ if (!(adev->gfx.rlc.in_safe_mode[xcc_id]))
return;
/* if RLC is not enabled, do nothing */
@@ -70,8 +72,8 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
if (adev->cg_flags &
(AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG)) {
- adev->gfx.rlc.funcs->unset_safe_mode(adev);
- adev->gfx.rlc.in_safe_mode = false;
+ adev->gfx.rlc.funcs->unset_safe_mode(adev, xcc_id);
+ adev->gfx.rlc.in_safe_mode[xcc_id] = false;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 23f060db9255..b591d33af264 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -26,6 +26,8 @@
#include "clearstate_defs.h"
+#define AMDGPU_MAX_RLC_INSTANCES 8
+
/* firmware ID used in rlc toc */
typedef enum _FIRMWARE_ID_ {
FIRMWARE_ID_INVALID = 0,
@@ -157,8 +159,8 @@ typedef struct _RLC_TABLE_OF_CONTENT {
struct amdgpu_rlc_funcs {
bool (*is_rlc_enabled)(struct amdgpu_device *adev);
- void (*set_safe_mode)(struct amdgpu_device *adev);
- void (*unset_safe_mode)(struct amdgpu_device *adev);
+ void (*set_safe_mode)(struct amdgpu_device *adev, int xcc_id);
+ void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id);
int (*init)(struct amdgpu_device *adev);
u32 (*get_csb_size)(struct amdgpu_device *adev);
void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
@@ -201,7 +203,7 @@ struct amdgpu_rlc {
u32 cp_table_size;
/* safe mode for updating CG/PG state */
- bool in_safe_mode;
+ bool in_safe_mode[AMDGPU_MAX_RLC_INSTANCES];
const struct amdgpu_rlc_funcs *funcs;
/* for firmware data */
@@ -257,11 +259,11 @@ struct amdgpu_rlc {
bool rlcg_reg_access_supported;
/* registers for rlcg indirect reg access */
- struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl;
+ struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl[AMDGPU_MAX_RLC_INSTANCES];
};
-void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
-void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id);
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id);
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 231ca06bc9c7..e2b9392d7f0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -64,7 +64,7 @@ int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
}
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
- unsigned vmid)
+ unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
uint64_t csa_mc_addr;
@@ -72,7 +72,7 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
int r;
/* don't enable OS preemption on SDMA under SRIOV */
- if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
+ if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
return 0;
if (ring->is_mes_queue) {
@@ -239,9 +239,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
sizeof(struct amdgpu_sdma_instance));
}
- if (amdgpu_sriov_vf(adev))
- return 0;
-
DRM_DEBUG("psp_load == '%s'\n",
adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
@@ -252,6 +249,13 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
if (!duplicate && (instance != i))
continue;
else {
+ /* Use a single copy per SDMA firmware type. PSP uses the same instance for all
+ * groups of SDMAs */
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2) &&
+ adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
+ adev->sdma.num_inst_per_aid == i) {
+ break;
+ }
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
info->fw = adev->sdma.instance[i].fw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index fc8528812598..513ac22120c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -26,7 +26,7 @@
#include "amdgpu_ras.h"
/* max number of IP instances */
-#define AMDGPU_MAX_SDMA_INSTANCES 8
+#define AMDGPU_MAX_SDMA_INSTANCES 16
enum amdgpu_sdma_irq {
AMDGPU_SDMA_IRQ_INSTANCE0 = 0,
@@ -37,9 +37,19 @@ enum amdgpu_sdma_irq {
AMDGPU_SDMA_IRQ_INSTANCE5,
AMDGPU_SDMA_IRQ_INSTANCE6,
AMDGPU_SDMA_IRQ_INSTANCE7,
+ AMDGPU_SDMA_IRQ_INSTANCE8,
+ AMDGPU_SDMA_IRQ_INSTANCE9,
+ AMDGPU_SDMA_IRQ_INSTANCE10,
+ AMDGPU_SDMA_IRQ_INSTANCE11,
+ AMDGPU_SDMA_IRQ_INSTANCE12,
+ AMDGPU_SDMA_IRQ_INSTANCE13,
+ AMDGPU_SDMA_IRQ_INSTANCE14,
+ AMDGPU_SDMA_IRQ_INSTANCE15,
AMDGPU_SDMA_IRQ_LAST
};
+#define NUM_SDMA(x) hweight32(x)
+
struct amdgpu_sdma_instance {
/* SDMA firmware */
const struct firmware *fw;
@@ -49,6 +59,35 @@ struct amdgpu_sdma_instance {
struct amdgpu_ring ring;
struct amdgpu_ring page;
bool burst_nop;
+ uint32_t aid_id;
+};
+
+enum amdgpu_sdma_ras_memory_id {
+ AMDGPU_SDMA_MBANK_DATA_BUF0 = 1,
+ AMDGPU_SDMA_MBANK_DATA_BUF1 = 2,
+ AMDGPU_SDMA_MBANK_DATA_BUF2 = 3,
+ AMDGPU_SDMA_MBANK_DATA_BUF3 = 4,
+ AMDGPU_SDMA_MBANK_DATA_BUF4 = 5,
+ AMDGPU_SDMA_MBANK_DATA_BUF5 = 6,
+ AMDGPU_SDMA_MBANK_DATA_BUF6 = 7,
+ AMDGPU_SDMA_MBANK_DATA_BUF7 = 8,
+ AMDGPU_SDMA_MBANK_DATA_BUF8 = 9,
+ AMDGPU_SDMA_MBANK_DATA_BUF9 = 10,
+ AMDGPU_SDMA_MBANK_DATA_BUF10 = 11,
+ AMDGPU_SDMA_MBANK_DATA_BUF11 = 12,
+ AMDGPU_SDMA_MBANK_DATA_BUF12 = 13,
+ AMDGPU_SDMA_MBANK_DATA_BUF13 = 14,
+ AMDGPU_SDMA_MBANK_DATA_BUF14 = 15,
+ AMDGPU_SDMA_MBANK_DATA_BUF15 = 16,
+ AMDGPU_SDMA_UCODE_BUF = 17,
+ AMDGPU_SDMA_RB_CMD_BUF = 18,
+ AMDGPU_SDMA_IB_CMD_BUF = 19,
+ AMDGPU_SDMA_UTCL1_RD_FIFO = 20,
+ AMDGPU_SDMA_UTCL1_RDBST_FIFO = 21,
+ AMDGPU_SDMA_UTCL1_WR_FIFO = 22,
+ AMDGPU_SDMA_DATA_LUT_FIFO = 23,
+ AMDGPU_SDMA_SPLIT_DAT_BUF = 24,
+ AMDGPU_SDMA_MEMORY_BLOCK_LAST,
};
struct amdgpu_sdma_ras {
@@ -66,6 +105,8 @@ struct amdgpu_sdma {
struct amdgpu_irq_src srbm_write_irq;
int num_instances;
+ uint32_t sdma_mask;
+ int num_inst_per_aid;
uint32_t srbm_soft_reset;
bool has_page_queue;
struct ras_common_if *ras_if;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
index c7a823f3f2c5..89c38d864471 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
@@ -30,6 +30,7 @@ struct amdgpu_smuio_funcs {
void (*get_clock_gating_state)(struct amdgpu_device *adev, u64 *flags);
u32 (*get_die_id)(struct amdgpu_device *adev);
u32 (*get_socket_id)(struct amdgpu_device *adev);
+ enum amdgpu_pkg_type (*get_pkg_type)(struct amdgpu_device *adev);
bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 525dffbe046a..2fd1bfb35916 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -432,7 +432,7 @@ TRACE_EVENT(amdgpu_vm_flush,
),
TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx",
__get_str(ring), __entry->vmid,
- __entry->vm_hub,__entry->pd_addr)
+ __entry->vm_hub, __entry->pd_addr)
);
DECLARE_EVENT_CLASS(amdgpu_pasid,
@@ -494,7 +494,7 @@ TRACE_EVENT(amdgpu_cs_bo_status,
);
TRACE_EVENT(amdgpu_bo_move,
- TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
+ TP_PROTO(struct amdgpu_bo *bo, uint32_t new_placement, uint32_t old_placement),
TP_ARGS(bo, new_placement, old_placement),
TP_STRUCT__entry(
__field(struct amdgpu_bo *, bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 2cd081cbf706..4e51dce3aab5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -38,7 +38,6 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/swap.h>
-#include <linux/swiotlb.h>
#include <linux/dma-buf.h>
#include <linux/sizes.h>
#include <linux/module.h>
@@ -50,7 +49,6 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/amdgpu_drm.h>
-#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_object.h"
@@ -65,7 +63,7 @@
MODULE_IMPORT_NS(DMA_BUF);
-#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
+#define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128)
static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
@@ -128,6 +126,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case AMDGPU_PL_GDS:
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
+ case AMDGPU_PL_DOORBELL:
placement->num_placement = 0;
placement->num_busy_placement = 0;
return;
@@ -184,11 +183,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct amdgpu_res_cursor *mm_cur,
- unsigned window, struct amdgpu_ring *ring,
+ unsigned int window, struct amdgpu_ring *ring,
bool tmz, uint64_t *size, uint64_t *addr)
{
struct amdgpu_device *adev = ring->adev;
- unsigned offset, num_pages, num_dw, num_bytes;
+ unsigned int offset, num_pages, num_dw, num_bytes;
uint64_t src_addr, dst_addr;
struct amdgpu_job *job;
void *cpu_addr;
@@ -229,7 +228,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job);
@@ -384,7 +383,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
struct dma_fence *wipe_fence = NULL;
- r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence);
+ r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence,
+ false);
if (r) {
goto error;
} else if (wipe_fence) {
@@ -496,9 +496,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (old_mem->mem_type == AMDGPU_PL_GDS ||
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
+ old_mem->mem_type == AMDGPU_PL_DOORBELL ||
new_mem->mem_type == AMDGPU_PL_GDS ||
new_mem->mem_type == AMDGPU_PL_GWS ||
- new_mem->mem_type == AMDGPU_PL_OA) {
+ new_mem->mem_type == AMDGPU_PL_OA ||
+ new_mem->mem_type == AMDGPU_PL_DOORBELL) {
/* Nothing to save here */
ttm_bo_move_null(bo, new_mem);
goto out;
@@ -582,6 +584,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
break;
+ case AMDGPU_PL_DOORBELL:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset += adev->doorbell.base;
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_uncached;
+ break;
default:
return -EINVAL;
}
@@ -596,6 +604,10 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
&cursor);
+
+ if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
+ return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
+
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@@ -631,6 +643,7 @@ struct amdgpu_ttm_tt {
struct task_struct *usertask;
uint32_t userflags;
bool bound;
+ int32_t pool_id;
};
#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
@@ -800,6 +813,44 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
sg_free_table(ttm->sg);
}
+/*
+ * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ...
+ * MQDn+CtrlStackn where n is the number of XCCs per partition.
+ * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD
+ * and uses memory type default, UC. The rest of pages_per_xcc are
+ * Ctrl stack and modify their memory type to NC.
+ */
+static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev,
+ struct ttm_tt *ttm, uint64_t flags)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ uint64_t total_pages = ttm->num_pages;
+ int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp);
+ uint64_t page_idx, pages_per_xcc;
+ int i;
+ uint64_t ctrl_flags = (flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
+ AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
+
+ pages_per_xcc = total_pages;
+ do_div(pages_per_xcc, num_xcc);
+
+ for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) {
+ /* MQD page: use default flags */
+ amdgpu_gart_bind(adev,
+ gtt->offset + (page_idx << PAGE_SHIFT),
+ 1, &gtt->ttm.dma_address[page_idx], flags);
+ /*
+ * Ctrl pages - modify the memory type to NC (ctrl_flags) from
+ * the second page of the BO onward.
+ */
+ amdgpu_gart_bind(adev,
+ gtt->offset + ((page_idx + 1) << PAGE_SHIFT),
+ pages_per_xcc - 1,
+ &gtt->ttm.dma_address[page_idx + 1],
+ ctrl_flags);
+ }
+}
+
static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
struct ttm_buffer_object *tbo,
uint64_t flags)
@@ -812,21 +863,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
flags |= AMDGPU_PTE_TMZ;
if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
- uint64_t page_idx = 1;
-
- amdgpu_gart_bind(adev, gtt->offset, page_idx,
- gtt->ttm.dma_address, flags);
-
- /* The memory type of the first page defaults to UC. Now
- * modify the memory type to NC from the second page of
- * the BO onward.
- */
- flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
-
- amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT),
- ttm->num_pages - page_idx,
- &(gtt->ttm.dma_address[page_idx]), flags);
+ amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags);
} else {
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
gtt->ttm.dma_address, flags);
@@ -1029,15 +1066,20 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_ttm_tt *gtt;
enum ttm_caching caching;
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
- if (gtt == NULL) {
+ if (!gtt)
return NULL;
- }
+
gtt->gobj = &bo->base;
+ if (adev->gmc.mem_partitions && abo->xcp_id >= 0)
+ gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
+ else
+ gtt->pool_id = abo->xcp_id;
if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
caching = ttm_write_combined;
@@ -1064,6 +1106,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
+ struct ttm_pool *pool;
pgoff_t i;
int ret;
@@ -1078,7 +1121,11 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
return 0;
- ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
+ if (adev->mman.ttm_pools && gtt->pool_id >= 0)
+ pool = &adev->mman.ttm_pools[gtt->pool_id];
+ else
+ pool = &adev->mman.bdev.pool;
+ ret = ttm_pool_alloc(pool, ttm, ctx);
if (ret)
return ret;
@@ -1099,6 +1146,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
{
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
struct amdgpu_device *adev;
+ struct ttm_pool *pool;
pgoff_t i;
amdgpu_ttm_backend_unbind(bdev, ttm);
@@ -1117,7 +1165,13 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
ttm->pages[i]->mapping = NULL;
adev = amdgpu_ttm_adev(bdev);
- return ttm_pool_free(&adev->mman.bdev.pool, ttm);
+
+ if (adev->mman.ttm_pools && gtt->pool_id >= 0)
+ pool = &adev->mman.ttm_pools[gtt->pool_id];
+ else
+ pool = &adev->mman.bdev.pool;
+
+ return ttm_pool_free(pool, ttm);
}
/**
@@ -1263,6 +1317,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
flags |= AMDGPU_PTE_VALID;
if (mem && (mem->mem_type == TTM_PL_TT ||
+ mem->mem_type == AMDGPU_PL_DOORBELL ||
mem->mem_type == AMDGPU_PL_PREEMPT)) {
flags |= AMDGPU_PTE_SYSTEM;
@@ -1414,7 +1469,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
memcpy(adev->mman.sdma_access_ptr, buf, len);
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
&job);
@@ -1623,14 +1678,15 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
return 0;
}
-static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
+static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev,
+ uint32_t reserve_size)
{
struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
memset(ctx, 0, sizeof(*ctx));
ctx->c2p_train_data_offset =
- ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
+ ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
ctx->p2c_train_data_offset =
(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size =
@@ -1648,11 +1704,12 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
*/
static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
{
- int ret;
struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
bool mem_train_support = false;
+ uint32_t reserve_size = 0;
+ int ret;
- if (!amdgpu_sriov_vf(adev)) {
+ if (adev->bios && !amdgpu_sriov_vf(adev)) {
if (amdgpu_atomfirmware_mem_training_supported(adev))
mem_train_support = true;
else
@@ -1666,14 +1723,18 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
* Otherwise, fallback to legacy approach to check and reserve tmr block for ip
* discovery data and G6 memory training data respectively
*/
- adev->mman.discovery_tmr_size =
- amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
- if (!adev->mman.discovery_tmr_size)
- adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
+ if (adev->bios)
+ reserve_size =
+ amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
+
+ if (!adev->bios && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ reserve_size = max(reserve_size, (uint32_t)280 << 20);
+ else if (!reserve_size)
+ reserve_size = DISCOVERY_TMR_OFFSET;
if (mem_train_support) {
/* reserve vram for mem train according to TMR location */
- amdgpu_ttm_training_data_block_init(adev);
+ amdgpu_ttm_training_data_block_init(adev, reserve_size);
ret = amdgpu_bo_create_kernel_at(adev,
ctx->c2p_train_data_offset,
ctx->train_data_size,
@@ -1687,20 +1748,58 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
}
- ret = amdgpu_bo_create_kernel_at(adev,
- adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
- adev->mman.discovery_tmr_size,
- &adev->mman.discovery_memory,
- NULL);
- if (ret) {
- DRM_ERROR("alloc tmr failed(%d)!\n", ret);
- amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
- return ret;
+ if (!adev->gmc.is_app_apu) {
+ ret = amdgpu_bo_create_kernel_at(
+ adev, adev->gmc.real_vram_size - reserve_size,
+ reserve_size, &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ DRM_ERROR("alloc tmr failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
+ NULL, NULL);
+ return ret;
+ }
+ } else {
+ DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n");
}
return 0;
}
+static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions)
+ return 0;
+
+ adev->mman.ttm_pools = kcalloc(adev->gmc.num_mem_partitions,
+ sizeof(*adev->mman.ttm_pools),
+ GFP_KERNEL);
+ if (!adev->mman.ttm_pools)
+ return -ENOMEM;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
+ ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
+ adev->gmc.mem_partitions[i].numa.node,
+ false, false);
+ }
+ return 0;
+}
+
+static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools)
+ return;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; i++)
+ ttm_pool_fini(&adev->mman.ttm_pools[i]);
+
+ kfree(adev->mman.ttm_pools);
+ adev->mman.ttm_pools = NULL;
+}
+
/*
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1727,6 +1826,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
+
+ r = amdgpu_ttm_pools_init(adev);
+ if (r) {
+ DRM_ERROR("failed to init ttm pools(%d).\n", r);
+ return r;
+ }
adev->mman.initialized = true;
/* Initialize VRAM pool with all of VRAM divided into pages */
@@ -1744,6 +1849,9 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
adev->gmc.visible_vram_size);
+ else if (adev->gmc.is_app_apu)
+ DRM_DEBUG_DRIVER(
+ "No need to ioremap when real vram size is 0\n");
else
#endif
adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
@@ -1755,9 +1863,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*place on the VRAM, so reserve it early.
*/
r = amdgpu_ttm_fw_reserve_vram_init(adev);
- if (r) {
+ if (r)
return r;
- }
/*
*The reserved vram for driver must be pinned to the specified
@@ -1781,49 +1888,46 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* allocate memory as required for VGA
* This is used for VGA emulation and pre-OS scanout buffers to
* avoid display artifacts while transitioning between pre-OS
- * and driver. */
- r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
- &adev->mman.stolen_vga_memory,
- NULL);
- if (r)
- return r;
- r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
- adev->mman.stolen_extended_size,
- &adev->mman.stolen_extended_memory,
- NULL);
- if (r)
- return r;
- r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
- adev->mman.stolen_reserved_size,
- &adev->mman.stolen_reserved_memory,
- NULL);
- if (r)
- return r;
+ * and driver.
+ */
+ if (!adev->gmc.is_app_apu) {
+ r = amdgpu_bo_create_kernel_at(adev, 0,
+ adev->mman.stolen_vga_size,
+ &adev->mman.stolen_vga_memory,
+ NULL);
+ if (r)
+ return r;
- DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
- (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
-
- /* Compute GTT size, either based on 1/2 the size of RAM size
- * or whatever the user passed on module init */
- if (amdgpu_gtt_size == -1) {
- struct sysinfo si;
-
- si_meminfo(&si);
- /* Certain GL unit tests for large textures can cause problems
- * with the OOM killer since there is no way to link this memory
- * to a process. This was originally mitigated (but not necessarily
- * eliminated) by limiting the GTT size. The problem is this limit
- * is often too low for many modern games so just make the limit 1/2
- * of system memory which aligns with TTM. The OOM accounting needs
- * to be addressed, but we shouldn't prevent common 3D applications
- * from being usable just to potentially mitigate that corner case.
- */
- gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- (u64)si.totalram * si.mem_unit / 2);
+ r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
+ adev->mman.stolen_extended_size,
+ &adev->mman.stolen_extended_memory,
+ NULL);
+
+ if (r)
+ return r;
+
+ r = amdgpu_bo_create_kernel_at(adev,
+ adev->mman.stolen_reserved_offset,
+ adev->mman.stolen_reserved_size,
+ &adev->mman.stolen_reserved_memory,
+ NULL);
+ if (r)
+ return r;
} else {
- gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+ DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
}
+ DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+ (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
+
+ /* Compute GTT size, either based on TTM limit
+ * or whatever the user passed on module init.
+ */
+ if (amdgpu_gtt_size == -1)
+ gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
+ else
+ gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+
/* Initialize GTT memory pool */
r = amdgpu_gtt_mgr_init(adev, gtt_size);
if (r) {
@@ -1831,7 +1935,21 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
- (unsigned)(gtt_size / (1024 * 1024)));
+ (unsigned int)(gtt_size / (1024 * 1024)));
+
+ /* Initiailize doorbell pool on PCI BAR */
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
+ if (r) {
+ DRM_ERROR("Failed initializing doorbell heap.\n");
+ return r;
+ }
+
+ /* Create a boorbell page for kernel usages */
+ r = amdgpu_doorbell_create_kernel_doorbells(adev);
+ if (r) {
+ DRM_ERROR("Failed to initialize kernel doorbells.\n");
+ return r;
+ }
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
@@ -1858,7 +1976,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_ERROR("Failed initializing oa heap.\n");
return r;
}
-
if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mman.sdma_access_bo, NULL,
@@ -1874,18 +1991,24 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
void amdgpu_ttm_fini(struct amdgpu_device *adev)
{
int idx;
+
if (!adev->mman.initialized)
return;
+ amdgpu_ttm_pools_fini(adev);
+
amdgpu_ttm_training_reserve_vram_fini(adev);
/* return the stolen vga memory back to VRAM */
- amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
- /* return the IP Discovery TMR memory back to VRAM */
- amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
- if (adev->mman.stolen_reserved_size)
- amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
- NULL, NULL);
+ if (!adev->gmc.is_app_apu) {
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
+ /* return the FW reserved memory back to VRAM */
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
+ NULL);
+ if (adev->mman.stolen_reserved_size)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
+ NULL, NULL);
+ }
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
amdgpu_ttm_fw_reserve_vram_fini(adev);
@@ -1927,7 +2050,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
int r;
if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
- adev->mman.buffer_funcs_enabled == enable)
+ adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu)
return;
if (enable) {
@@ -1936,7 +2059,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
ring = adev->mman.buffer_funcs_ring;
sched = &ring->sched;
- r = drm_sched_entity_init(&adev->mman.entity,
+ r = drm_sched_entity_init(&adev->mman.high_pr,
DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
if (r) {
@@ -1944,8 +2067,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
r);
return;
}
+
+ r = drm_sched_entity_init(&adev->mman.low_pr,
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
+ r);
+ goto error_free_entity;
+ }
} else {
- drm_sched_entity_destroy(&adev->mman.entity);
+ drm_sched_entity_destroy(&adev->mman.high_pr);
+ drm_sched_entity_destroy(&adev->mman.low_pr);
dma_fence_put(man->move);
man->move = NULL;
}
@@ -1957,6 +2090,11 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
size = adev->gmc.visible_vram_size;
man->size = size;
adev->mman.buffer_funcs_enabled = enable;
+
+ return;
+
+error_free_entity:
+ drm_sched_entity_destroy(&adev->mman.high_pr);
}
static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
@@ -1964,14 +2102,16 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
unsigned int num_dw,
struct dma_resv *resv,
bool vm_needs_flush,
- struct amdgpu_job **job)
+ struct amdgpu_job **job,
+ bool delayed)
{
enum amdgpu_ib_pool_type pool = direct_submit ?
AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED;
int r;
-
- r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr :
+ &adev->mman.high_pr;
+ r = amdgpu_job_alloc_with_ib(adev, entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, pool, job);
if (r)
@@ -1997,10 +2137,10 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
bool vm_needs_flush, bool tmz)
{
struct amdgpu_device *adev = ring->adev;
- unsigned num_loops, num_dw;
+ unsigned int num_loops, num_dw;
struct amdgpu_job *job;
uint32_t max_bytes;
- unsigned i;
+ unsigned int i;
int r;
if (!direct_submit && !ring->sched.ready) {
@@ -2012,7 +2152,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
- resv, vm_needs_flush, &job);
+ resv, vm_needs_flush, &job, false);
if (r)
return r;
@@ -2048,7 +2188,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
uint64_t dst_addr, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
- bool vm_needs_flush)
+ bool vm_needs_flush, bool delayed)
{
struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
@@ -2061,7 +2201,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
- &job);
+ &job, delayed);
if (r)
return r;
@@ -2084,7 +2224,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
- struct dma_fence **f)
+ struct dma_fence **f,
+ bool delayed)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -2113,7 +2254,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
goto error;
r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
- &next, true);
+ &next, true, delayed);
if (r)
goto error;
@@ -2164,7 +2305,7 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct amdgpu_device *adev = m->private;
return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
}
@@ -2278,7 +2419,7 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
struct page *p;
void *ptr;
- bytes = bytes < size ? bytes : size;
+ bytes = min(bytes, size);
/* Translate the bus address to a physical address. If
* the domain is NULL it means there is no IOMMU active
@@ -2333,7 +2474,7 @@ static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
struct page *p;
void *ptr;
- bytes = bytes < size ? bytes : size;
+ bytes = min(bytes, size);
addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index e2cd5894afc9..65ec82141a8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -33,12 +33,16 @@
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
+#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
#define AMDGPU_POISON 0xd0bed0be
+extern const struct attribute_group amdgpu_vram_mgr_attr_group;
+extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
+
struct hmm_range;
struct amdgpu_gtt_mgr {
@@ -49,6 +53,7 @@ struct amdgpu_gtt_mgr {
struct amdgpu_mman {
struct ttm_device bdev;
+ struct ttm_pool *ttm_pools;
bool initialized;
void __iomem *aper_base_kaddr;
@@ -58,8 +63,10 @@ struct amdgpu_mman {
bool buffer_funcs_enabled;
struct mutex gtt_window_lock;
- /* Scheduler entity for buffer moves */
- struct drm_sched_entity entity;
+ /* High priority scheduler entity for buffer moves */
+ struct drm_sched_entity high_pr;
+ /* Low priority scheduler entity for VRAM clearing */
+ struct drm_sched_entity low_pr;
struct amdgpu_vram_mgr vram_mgr;
struct amdgpu_gtt_mgr gtt_mgr;
@@ -78,7 +85,8 @@ struct amdgpu_mman {
/* discovery */
uint8_t *discovery_bin;
uint32_t discovery_tmr_size;
- struct amdgpu_bo *discovery_memory;
+ /* fw reserved memory */
+ struct amdgpu_bo *fw_reserved_memory;
/* firmware VRAM reservation */
u64 fw_vram_usage_start_offset;
@@ -150,7 +158,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
- struct dma_fence **fence);
+ struct dma_fence **fence,
+ bool delayed);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index f76b1cb8baf8..8beefc045e14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -703,6 +703,8 @@ FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
+FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
+FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
static struct attribute *fw_attrs[] = {
&dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
@@ -716,6 +718,7 @@ static struct attribute *fw_attrs[] = {
&dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
&dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
+ &dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr,
NULL
};
@@ -748,7 +751,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
const struct imu_firmware_header_v1_0 *imu_hdr = NULL;
u8 *ucode_addr;
- if (NULL == ucode->fw)
+ if (!ucode->fw)
return 0;
ucode->mc_addr = mc_addr;
@@ -972,7 +975,7 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
uint8_t *src_addr = NULL;
uint8_t *dst_addr = NULL;
- if (NULL == ucode->fw)
+ if (!ucode->fw)
return 0;
comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
@@ -1043,6 +1046,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
const struct gfx_firmware_header_v1_0 *cp_hdr;
+
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
adev->firmware.fw_buf_ptr + fw_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 1edf8e6aeb16..db0d94ca4ffc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -169,27 +169,31 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset)
{
int ret = AMDGPU_RAS_SUCCESS;
- if (!amdgpu_sriov_vf(adev)) {
- if (!adev->gmc.xgmi.connected_to_cpu) {
- struct ras_err_data err_data = {0, 0, 0, NULL};
- struct ras_common_if head = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- };
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
-
- ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
-
- if (ret == AMDGPU_RAS_SUCCESS && obj) {
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
- }
- } else if (reset) {
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ adev->gmc.is_app_apu) {
+ if (reset) {
/* MCA poison handler is only responsible for GPU reset,
* let MCA notifier do page retirement.
*/
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
amdgpu_ras_reset_gpu(adev);
}
+ return ret;
+ }
+
+ if (!amdgpu_sriov_vf(adev)) {
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+
+ ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
+
+ if (ret == AMDGPU_RAS_SUCCESS && obj) {
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ }
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
adev->virt.ops->ras_poison_handler(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 86133f77a9a4..43321f57f557 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -59,6 +59,8 @@ struct amdgpu_umc_ras {
void *ras_error_status);
void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
+ /* support different eeprom table version for different asic */
+ void (*set_eeprom_table_version)(struct amdgpu_ras_eeprom_table_header *hdr);
};
struct amdgpu_umc_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
index 919d9d401750..107f9bb0e24f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
@@ -35,17 +35,51 @@ struct amdgpu_debugfs_regs2_iocdata {
} srbm;
};
+struct amdgpu_debugfs_regs2_iocdata_v2 {
+ __u32 use_srbm, use_grbm, pg_lock;
+ struct {
+ __u32 se, sh, instance;
+ } grbm;
+ struct {
+ __u32 me, pipe, queue, vmid;
+ } srbm;
+ u32 xcc_id;
+};
+
+struct amdgpu_debugfs_gprwave_iocdata {
+ u32 gpr_or_wave, se, sh, cu, wave, simd, xcc_id;
+ struct {
+ u32 thread, vpgr_or_sgpr;
+ } gpr;
+};
+
/*
* MMIO debugfs state data (per file* handle)
*/
struct amdgpu_debugfs_regs2_data {
struct amdgpu_device *adev;
struct mutex lock;
- struct amdgpu_debugfs_regs2_iocdata id;
+ struct amdgpu_debugfs_regs2_iocdata_v2 id;
+};
+
+struct amdgpu_debugfs_gprwave_data {
+ struct amdgpu_device *adev;
+ struct mutex lock;
+ struct amdgpu_debugfs_gprwave_iocdata id;
};
enum AMDGPU_DEBUGFS_REGS2_CMDS {
AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0,
+ AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2,
+};
+
+enum AMDGPU_DEBUGFS_GPRWAVE_CMDS {
+ AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE=0,
};
+//reg2 interface
#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE, struct amdgpu_debugfs_regs2_iocdata)
+#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2 _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2, struct amdgpu_debugfs_regs2_iocdata_v2)
+
+//gprwave interface
+#define AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE, struct amdgpu_debugfs_gprwave_iocdata)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 6887109abb13..b7441654e6fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -96,16 +96,16 @@
*/
struct amdgpu_uvd_cs_ctx {
struct amdgpu_cs_parser *parser;
- unsigned reg, count;
- unsigned data0, data1;
- unsigned idx;
+ unsigned int reg, count;
+ unsigned int data0, data1;
+ unsigned int idx;
struct amdgpu_ib *ib;
/* does the IB has a msg command */
bool has_msg_cmd;
/* minimum buffer sizes */
- unsigned *buf_sizes;
+ unsigned int *buf_sizes;
};
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -186,7 +186,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
- unsigned family_id;
+ unsigned int family_id;
int i, j, r;
INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
@@ -275,7 +275,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
if (adev->asic_type < CHIP_VEGA20) {
- unsigned version_major, version_minor;
+ unsigned int version_major, version_minor;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
@@ -420,7 +420,7 @@ int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
- unsigned size;
+ unsigned int size;
void *ptr;
int i, j, idx;
bool in_ras_intr = amdgpu_ras_intr_triggered();
@@ -469,7 +469,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
int amdgpu_uvd_resume(struct amdgpu_device *adev)
{
- unsigned size;
+ unsigned int size;
void *ptr;
int i, idx;
@@ -491,7 +491,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
adev->uvd.inst[i].saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
- unsigned offset;
+ unsigned int offset;
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
@@ -542,6 +542,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
{
int i;
+
for (i = 0; i < abo->placement.num_placement; ++i) {
abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
@@ -579,7 +580,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
if (r) {
- DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
+ DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
return r;
}
@@ -589,6 +590,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
if (cmd == 0x0 || cmd == 0x3) {
/* yes, force it into VRAM */
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
+
amdgpu_bo_placement_from_domain(bo, domain);
}
amdgpu_uvd_force_into_uvd_segment(bo);
@@ -609,21 +611,21 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
* Peek into the decode message and calculate the necessary buffer sizes.
*/
static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
- unsigned buf_sizes[])
+ unsigned int buf_sizes[])
{
- unsigned stream_type = msg[4];
- unsigned width = msg[6];
- unsigned height = msg[7];
- unsigned dpb_size = msg[9];
- unsigned pitch = msg[28];
- unsigned level = msg[57];
+ unsigned int stream_type = msg[4];
+ unsigned int width = msg[6];
+ unsigned int height = msg[7];
+ unsigned int dpb_size = msg[9];
+ unsigned int pitch = msg[28];
+ unsigned int level = msg[57];
- unsigned width_in_mb = width / 16;
- unsigned height_in_mb = ALIGN(height / 16, 2);
- unsigned fs_in_mb = width_in_mb * height_in_mb;
+ unsigned int width_in_mb = width / 16;
+ unsigned int height_in_mb = ALIGN(height / 16, 2);
+ unsigned int fs_in_mb = width_in_mb * height_in_mb;
- unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
- unsigned min_ctx_size = ~0;
+ unsigned int image_size, tmp, min_dpb_size, num_dpb_buffer;
+ unsigned int min_ctx_size = ~0;
image_size = width * height;
image_size += image_size / 2;
@@ -631,7 +633,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
switch (stream_type) {
case 0: /* H264 */
- switch(level) {
+ switch (level) {
case 30:
num_dpb_buffer = 8100 / fs_in_mb;
break;
@@ -709,7 +711,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
break;
case 7: /* H264 Perf */
- switch(level) {
+ switch (level) {
case 30:
num_dpb_buffer = 8100 / fs_in_mb;
break;
@@ -742,7 +744,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
/* reference picture buffer */
min_dpb_size = image_size * num_dpb_buffer;
- if (!adev->uvd.use_ctx_buf){
+ if (!adev->uvd.use_ctx_buf) {
/* macroblock context buffer */
min_dpb_size +=
width_in_mb * height_in_mb * num_dpb_buffer * 192;
@@ -805,7 +807,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
* Make sure that we don't open up to many sessions.
*/
static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
- struct amdgpu_bo *bo, unsigned offset)
+ struct amdgpu_bo *bo, unsigned int offset)
{
struct amdgpu_device *adev = ctx->parser->adev;
int32_t *msg, msg_type, handle;
@@ -911,7 +913,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
if (r) {
- DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
+ DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
return r;
}
@@ -930,7 +932,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
if (cmd < 0x4) {
if ((end - start) < ctx->buf_sizes[cmd]) {
DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
- (unsigned)(end - start),
+ (unsigned int)(end - start),
ctx->buf_sizes[cmd]);
return -EINVAL;
}
@@ -938,7 +940,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
} else if (cmd == 0x206) {
if ((end - start) < ctx->buf_sizes[4]) {
DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
- (unsigned)(end - start),
+ (unsigned int)(end - start),
ctx->buf_sizes[4]);
return -EINVAL;
}
@@ -949,14 +951,14 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
if (!ctx->parser->adev->uvd.address_64_bit) {
if ((start >> 28) != ((end - 1) >> 28)) {
- DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+ DRM_ERROR("reloc %llx-%llx crossing 256MB boundary!\n",
start, end);
return -EINVAL;
}
if ((cmd == 0 || cmd == 0x3) &&
(start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
- DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
+ DRM_ERROR("msg/fb buffer %llx-%llx out of 256MB segment!\n",
start, end);
return -EINVAL;
}
@@ -990,7 +992,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
ctx->idx++;
for (i = 0; i <= ctx->count; ++i) {
- unsigned reg = ctx->reg + i;
+ unsigned int reg = ctx->reg + i;
if (ctx->idx >= ctx->ib->length_dw) {
DRM_ERROR("Register command after end of CS!\n");
@@ -1036,7 +1038,8 @@ static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
for (ctx->idx = 0 ; ctx->idx < ctx->ib->length_dw; ) {
uint32_t cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx);
- unsigned type = CP_PACKET_GET_TYPE(cmd);
+ unsigned int type = CP_PACKET_GET_TYPE(cmd);
+
switch (type) {
case PACKET_TYPE0:
ctx->reg = CP_PACKET0_GET_REG(cmd);
@@ -1070,7 +1073,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser,
struct amdgpu_ib *ib)
{
struct amdgpu_uvd_cs_ctx ctx = {};
- unsigned buf_sizes[] = {
+ unsigned int buf_sizes[] = {
[0x00000000] = 2048,
[0x00000001] = 0xFFFFFFFF,
[0x00000002] = 0xFFFFFFFF,
@@ -1185,8 +1188,9 @@ err_free:
}
/* multiple fence commands without any stream commands in between can
- crash the vcpu so just try to emmit a dummy create/destroy msg to
- avoid this */
+ * crash the vcpu so just try to emmit a dummy create/destroy msg to
+ * avoid this
+ */
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence **fence)
{
@@ -1252,15 +1256,14 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, uvd.idle_work.work);
- unsigned fences = 0, i, j;
+ unsigned int fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
- for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
+ for (j = 0; j < adev->uvd.num_enc_rings; ++j)
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
- }
}
if (fences == 0) {
@@ -1356,7 +1359,7 @@ error:
*/
uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
{
- unsigned i;
+ unsigned int i;
uint32_t used_handles = 0;
for (i = 0; i < adev->uvd.max_handles; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index e2b7324a70cb..1904edf68407 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -99,7 +99,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
{
const char *fw_name;
const struct common_firmware_header *hdr;
- unsigned ucode_version, version_major, version_minor, binary_id;
+ unsigned int ucode_version, version_major, version_minor, binary_id;
int i, r;
switch (adev->asic_type) {
@@ -207,7 +207,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
*/
int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
{
- unsigned i;
+ unsigned int i;
if (adev->vce.vcpu_bo == NULL)
return 0;
@@ -286,7 +286,7 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
{
void *cpu_addr;
const struct common_firmware_header *hdr;
- unsigned offset;
+ unsigned int offset;
int r, idx;
if (adev->vce.vcpu_bo == NULL)
@@ -332,7 +332,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, vce.idle_work.work);
- unsigned i, count = 0;
+ unsigned int i, count = 0;
for (i = 0; i < adev->vce.num_rings; i++)
count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
@@ -409,6 +409,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{
struct amdgpu_ring *ring = &adev->vce.ring[0];
int i, r;
+
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
uint32_t handle = atomic_read(&adev->vce.handles[i]);
@@ -436,7 +437,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence **fence)
{
- const unsigned ib_size_dw = 1024;
+ const unsigned int ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct amdgpu_ib ib_msg;
@@ -528,7 +529,7 @@ err:
static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence)
{
- const unsigned ib_size_dw = 1024;
+ const unsigned int ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
@@ -596,12 +597,12 @@ err:
*/
static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
struct amdgpu_ib *ib, int lo, int hi,
- unsigned size, int32_t index)
+ unsigned int size, int32_t index)
{
int64_t offset = ((uint64_t)size) * ((int64_t)index);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va_mapping *mapping;
- unsigned i, fpfn, lpfn;
+ unsigned int i, fpfn, lpfn;
struct amdgpu_bo *bo;
uint64_t addr;
int r;
@@ -619,7 +620,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
if (r) {
- DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
+ DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n",
addr, lo, hi, size, index);
return r;
}
@@ -646,7 +647,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
* Patch relocation inside command stream with real buffer address
*/
static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
- int lo, int hi, unsigned size, uint32_t index)
+ int lo, int hi, unsigned int size, uint32_t index)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
@@ -662,14 +663,14 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
if (r) {
- DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
+ DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n",
addr, lo, hi, size, index);
return r;
}
if ((addr + (uint64_t)size) >
(mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
+ DRM_ERROR("BO too small for addr 0x%010llx %d %d\n",
addr, lo, hi);
return -EINVAL;
}
@@ -692,12 +693,12 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
* @allocated: allocated a new handle?
*
* Validates the handle and return the found session index or -EINVAL
- * we we don't have another free session index.
+ * we don't have another free session index.
*/
static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
uint32_t handle, uint32_t *allocated)
{
- unsigned i;
+ unsigned int i;
/* validate the handle */
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
@@ -735,14 +736,14 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
- unsigned fb_idx = 0, bs_idx = 0;
+ unsigned int fb_idx = 0, bs_idx = 0;
int session_idx = -1;
uint32_t destroyed = 0;
uint32_t created = 0;
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
- unsigned idx;
+ unsigned int idx;
int i, r = 0;
job->vm = NULL;
@@ -1084,7 +1085,7 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
*
*/
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
- unsigned flags)
+ unsigned int flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
@@ -1106,7 +1107,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t rptr;
- unsigned i;
+ unsigned int i;
int r, timeout = adev->usec_timeout;
/* skip ring test for sriov*/
@@ -1171,7 +1172,7 @@ error:
enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
{
- switch(ring) {
+ switch (ring) {
case 0:
return AMDGPU_RING_PRIO_0;
case 1:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index e63fcc58e8e0..36b55d2bd51a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -56,6 +56,7 @@
#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
+#define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin"
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
@@ -77,6 +78,7 @@ MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -167,7 +169,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
- if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){
+ if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) {
fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
} else {
@@ -233,11 +235,11 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << j))
continue;
- if (adev->vcn.indirect_sram) {
- amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
- &adev->vcn.inst[j].dpg_sram_gpu_addr,
- (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
- }
+ amdgpu_bo_free_kernel(
+ &adev->vcn.inst[j].dpg_sram_bo,
+ &adev->vcn.inst[j].dpg_sram_gpu_addr,
+ (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
+
kvfree(adev->vcn.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
@@ -274,20 +276,19 @@ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type t
bool ret = false;
int vcn_config = adev->vcn.vcn_config[vcn_instance];
- if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
+ if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
ret = true;
- } else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) {
+ else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
ret = true;
- } else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
+ else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
ret = true;
- }
return ret;
}
int amdgpu_vcn_suspend(struct amdgpu_device *adev)
{
- unsigned size;
+ unsigned int size;
void *ptr;
int i, idx;
@@ -316,7 +317,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
int amdgpu_vcn_resume(struct amdgpu_device *adev)
{
- unsigned size;
+ unsigned int size;
void *ptr;
int i, idx;
@@ -338,7 +339,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
adev->vcn.inst[i].saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
- unsigned offset;
+ unsigned int offset;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
@@ -369,9 +370,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
if (adev->vcn.harvest_config & (1 << j))
continue;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
- }
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
@@ -458,7 +458,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t tmp = 0;
- unsigned i;
+ unsigned int i;
int r;
/* VCN in SRIOV does not support direct register read/write */
@@ -795,7 +795,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t rptr;
- unsigned i;
+ unsigned int i;
int r;
if (amdgpu_sriov_vf(adev))
@@ -993,11 +993,14 @@ error:
int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
+ struct amdgpu_device *adev = ring->adev;
long r;
- r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
- if (r)
- goto error;
+ if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(4, 0, 3)) {
+ r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
+ if (r)
+ goto error;
+ }
r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
@@ -1007,7 +1010,7 @@ error:
enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
{
- switch(ring) {
+ switch (ring) {
case 0:
return AMDGPU_RING_PRIO_0;
case 1:
@@ -1026,6 +1029,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
+
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
@@ -1041,6 +1045,9 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
adev->firmware.ucode[idx].fw = adev->vcn.fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(4, 0, 3))
+ break;
}
dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
}
@@ -1051,7 +1058,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
*/
#if defined(CONFIG_DEBUG_FS)
static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+ size_t size, loff_t *pos)
{
struct amdgpu_vcn_inst *vcn;
void *log_buf;
@@ -1097,7 +1104,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
read_pos = plog->header_size;
if (read_num[i] == copy_to_user((buf + read_bytes),
- (log_buf + read_pos), read_num[i]))
+ (log_buf + read_pos), read_num[i]))
return -EFAULT;
read_bytes += read_num[i];
@@ -1118,7 +1125,7 @@ static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
#endif
void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
- struct amdgpu_vcn_inst *vcn)
+ struct amdgpu_vcn_inst *vcn)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
@@ -1126,7 +1133,7 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
char name[32];
sprintf(name, "amdgpu_vcn_%d_fwlog", i);
- debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn,
+ debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
&amdgpu_debugfs_vcnfwlog_fops,
AMDGPU_VCNFW_LOG_SIZE);
#endif
@@ -1140,7 +1147,7 @@ void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
- + vcn->fw_shared.log_offset;
+ + vcn->fw_shared.log_offset;
*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
fw_log->is_enabled = 1;
fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
@@ -1181,6 +1188,32 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
+int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r, i;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i) ||
+ !adev->vcn.inst[i].ras_poison_irq.funcs)
+ continue;
+
+ r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+ }
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+ return r;
+}
+
int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
{
int err;
@@ -1202,7 +1235,22 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
adev->vcn.ras_if = &ras->ras_block.ras_comm;
if (!ras->ras_block.ras_late_init)
- ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
return 0;
}
+
+int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+ enum AMDGPU_UCODE_ID ucode_id)
+{
+ struct amdgpu_firmware_info ucode = {
+ .ucode_id = (ucode_id ? ucode_id :
+ (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
+ AMDGPU_UCODE_ID_VCN0_RAM)),
+ .mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
+ .ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
+ (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
+ };
+
+ return psp_execute_ip_fw_load(&adev->psp, &ucode);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index c730949ece7d..a3eed90b6af0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -32,7 +32,7 @@
#define AMDGPU_VCN_FIRMWARE_OFFSET 256
#define AMDGPU_VCN_MAX_ENC_RINGS 3
-#define AMDGPU_MAX_VCN_INSTANCES 2
+#define AMDGPU_MAX_VCN_INSTANCES 4
#define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES
#define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
@@ -141,21 +141,27 @@
RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \
})
-#define WREG32_SOC15_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \
- do { \
- if (!indirect) { \
- WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value); \
- WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \
- (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
- mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
- offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
- } else { \
- *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset; \
- *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value; \
- } \
+#define WREG32_SOC15_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \
+ do { \
+ if (!indirect) { \
+ WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \
+ mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15( \
+ VCN, GET_INST(VCN, inst_idx), \
+ mmUVD_DPG_LMA_CTL, \
+ (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ } else { \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
+ offset; \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
+ value; \
+ } \
} while (0)
#define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2)
+#define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4)
#define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6)
#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
@@ -175,6 +181,8 @@
#define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0)
#define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1)
+#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+
enum fw_queue_mode {
FW_QUEUE_RING_RESET = 1,
FW_QUEUE_DPG_HOLD_OFF = 2,
@@ -234,6 +242,7 @@ struct amdgpu_vcn_inst {
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
atomic_t sched_score;
struct amdgpu_irq_src irq;
+ struct amdgpu_irq_src ras_poison_irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
struct dpg_pause_state pause_state;
@@ -242,6 +251,7 @@ struct amdgpu_vcn_inst {
uint32_t *dpg_sram_curr_addr;
atomic_t dpg_enc_submission_cnt;
struct amdgpu_vcn_fw_shared fw_shared;
+ uint8_t aid_id;
};
struct amdgpu_vcn_ras {
@@ -271,6 +281,9 @@ struct amdgpu_vcn {
struct ras_common_if *ras_if;
struct amdgpu_vcn_ras *ras;
+
+ uint16_t inst_mask;
+ uint8_t num_inst_per_aid;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -333,6 +346,11 @@ struct amdgpu_fw_shared_rb_setup {
uint32_t reserved[6];
};
+struct amdgpu_fw_shared_drm_key_wa {
+ uint8_t method;
+ uint8_t reserved[3];
+};
+
struct amdgpu_vcn4_fw_shared {
uint32_t present_flag_0;
uint8_t pad[12];
@@ -342,6 +360,7 @@ struct amdgpu_vcn4_fw_shared {
uint8_t pad2[20];
struct amdgpu_fw_shared_rb_setup rb_setup;
struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
+ struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
};
struct amdgpu_vcn_fwlog {
@@ -400,6 +419,11 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+ enum AMDGPU_UCODE_ID ucode_id);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index f2e2cbaa7fde..96857ae7fb5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -56,7 +56,8 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
/* enable virtual display */
if (adev->asic_type != CHIP_ALDEBARAN &&
- adev->asic_type != CHIP_ARCTURUS) {
+ adev->asic_type != CHIP_ARCTURUS &&
+ ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) {
if (adev->mode_info.num_crtc == 0)
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
@@ -65,16 +66,16 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
adev->cg_flags = 0;
adev->pg_flags = 0;
- /* enable mcbp for sriov asic_type before soc21 */
- amdgpu_mcbp = (adev->asic_type < CHIP_IP_DISCOVERY) ? 1 : 0;
-
+ /* Reduce kcq number to 2 to reduce latency */
+ if (amdgpu_num_kcq == -1)
+ amdgpu_num_kcq = 2;
}
void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
signed long r, cnt = 0;
unsigned long flags;
@@ -519,7 +520,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
}
- if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
+ if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
adev->virt.is_mm_bw_enabled = true;
adev->unique_id =
@@ -557,7 +558,6 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
- POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_IMU, adev->gfx.imu_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
adev->psp.asd_context.bin_desc.fw_version);
@@ -835,6 +835,16 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
return mode;
}
+void amdgpu_virt_post_reset(struct amdgpu_device *adev)
+{
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) {
+ /* force set to GFXOFF state after reset,
+ * to avoid some invalid operation before GC enable
+ */
+ adev->gfx.is_poweron = false;
+ }
+}
+
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
{
switch (adev->ip_versions[MP0_HWIP][0]) {
@@ -845,6 +855,17 @@ bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_i
return false;
else
return true;
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ /* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */
+ if (ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_SMC)
+ return true;
+ else
+ return false;
case IP_VERSION(13, 0, 10):
/* white list */
if (ucode_id == AMDGPU_UCODE_ID_CAP
@@ -954,7 +975,7 @@ static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
return ret;
}
-static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
{
struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
uint32_t timeout = 50000;
@@ -972,7 +993,12 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
return 0;
}
- reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+ if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
+ dev_err(adev->dev, "invalid xcc\n");
+ return 0;
+ }
+
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
@@ -1037,13 +1063,13 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
void amdgpu_sriov_wreg(struct amdgpu_device *adev,
u32 offset, u32 value,
- u32 acc_flags, u32 hwip)
+ u32 acc_flags, u32 hwip, u32 xcc_id)
{
u32 rlcg_flag;
if (!amdgpu_sriov_runtime(adev) &&
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
- amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
+ amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
return;
}
@@ -1054,13 +1080,13 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
}
u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
- u32 offset, u32 acc_flags, u32 hwip)
+ u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id)
{
u32 rlcg_flag;
if (!amdgpu_sriov_runtime(adev) &&
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
- return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
+ return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
if (acc_flags & AMDGPU_REGS_NO_KIQ)
return RREG32_NO_KIQ(offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 4f7bab52282a..fabb83e9d9ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -355,9 +355,10 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
struct amdgpu_video_codec_info *decode, uint32_t decode_array_size);
void amdgpu_sriov_wreg(struct amdgpu_device *adev,
u32 offset, u32 value,
- u32 acc_flags, u32 hwip);
+ u32 acc_flags, u32 hwip, u32 xcc_id);
u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
- u32 offset, u32 acc_flags, u32 hwip);
+ u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id);
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
uint32_t ucode_id);
+void amdgpu_virt_post_reset(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 53ff91fc6cf6..7148a216ae2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
DRM_WARN("%s: vblank timer overrun\n", __func__);
ret = drm_crtc_handle_vblank(crtc);
+ /* Don't queue timer again when vblank is disabled. */
if (!ret)
- DRM_ERROR("amdgpu_vkms failure on handling vblank");
+ return HRTIMER_NORESTART;
return HRTIMER_RESTART;
}
@@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- hrtimer_cancel(&amdgpu_crtc->vblank_timer);
+ hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);
}
static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
@@ -500,8 +501,6 @@ static int amdgpu_vkms_sw_init(void *handle)
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
- adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
-
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3c0310576b3b..f5daadcec865 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -34,6 +34,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_exec.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
@@ -111,9 +112,9 @@ struct amdgpu_prt_cb {
};
/**
- * struct amdgpu_vm_tlb_seq_cb - Helper to increment the TLB flush sequence
+ * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
*/
-struct amdgpu_vm_tlb_seq_cb {
+struct amdgpu_vm_tlb_seq_struct {
/**
* @vm: pointer to the amdgpu_vm structure to set the fence sequence on
*/
@@ -267,6 +268,32 @@ static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
}
/**
+ * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
+ * @vm: the VM which state machine to reset
+ *
+ * Move all vm_bo object in the VM into a state where they will be updated
+ * again during validation.
+ */
+static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
+{
+ struct amdgpu_vm_bo_base *vm_bo, *tmp;
+
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->done, &vm->invalidated);
+ list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
+ vm_bo->moved = true;
+ list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
+ struct amdgpu_bo *bo = vm_bo->bo;
+
+ if (!bo || bo->tbo.type != ttm_bo_type_kernel)
+ list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
+ else if (bo->parent)
+ list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
+ }
+ spin_unlock(&vm->status_lock);
+}
+
+/**
* amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
*
* @base: base structure for tracking BO usage in a VM
@@ -313,25 +340,20 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
}
/**
- * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
+ * amdgpu_vm_lock_pd - lock PD in drm_exec
*
* @vm: vm providing the BOs
- * @validated: head of validation list
- * @entry: entry to add
+ * @exec: drm execution context
+ * @num_fences: number of extra fences to reserve
*
- * Add the page directory to the list of BOs to
- * validate for command submission.
+ * Lock the VM root PD in the DRM execution context.
*/
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
- struct list_head *validated,
- struct amdgpu_bo_list_entry *entry)
+int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
{
- entry->priority = 0;
- entry->tv.bo = &vm->root.bo->tbo;
- /* Two for VM updates, one for TTM and one for the CS job */
- entry->tv.num_shared = 4;
- entry->user_pages = NULL;
- list_add(&entry->tv.head, validated);
+ /* We need at least two fences for the VM PD/PT updates */
+ return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
+ 2 + num_fences);
}
/**
@@ -351,6 +373,58 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
spin_unlock(&adev->mman.bdev.lru_lock);
}
+/* Create scheduler entities for page table updates */
+static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ int r;
+
+ r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
+ if (r)
+ goto error;
+
+ return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
+
+error:
+ drm_sched_entity_destroy(&vm->immediate);
+ return r;
+}
+
+/* Destroy the entities for page table updates again */
+static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
+{
+ drm_sched_entity_destroy(&vm->immediate);
+ drm_sched_entity_destroy(&vm->delayed);
+}
+
+/**
+ * amdgpu_vm_generation - return the page table re-generation counter
+ * @adev: the amdgpu_device
+ * @vm: optional VM to check, might be NULL
+ *
+ * Returns a page table re-generation token to allow checking if submissions
+ * are still valid to use this VM. The VM parameter might be NULL in which case
+ * just the VRAM lost counter will be used.
+ */
+uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+ uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
+
+ if (!vm)
+ return result;
+
+ result += vm->generation;
+ /* Add one if the page tables will be re-generated on next CS */
+ if (drm_sched_entity_error(&vm->delayed))
+ ++result;
+
+ return result;
+}
+
/**
* amdgpu_vm_validate_pt_bos - validate the page table BOs
*
@@ -373,6 +447,15 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo;
int r;
+ if (drm_sched_entity_error(&vm->delayed)) {
+ ++vm->generation;
+ amdgpu_vm_bo_reset_state_machine(vm);
+ amdgpu_vm_fini_entities(vm);
+ r = amdgpu_vm_init_entities(adev, vm);
+ if (r)
+ return r;
+ }
+
spin_lock(&vm->status_lock);
while (!list_empty(&vm->evicted)) {
bo_base = list_first_entry(&vm->evicted,
@@ -746,7 +829,7 @@ error:
static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
- struct amdgpu_vm_tlb_seq_cb *tlb_cb;
+ struct amdgpu_vm_tlb_seq_struct *tlb_cb;
tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
atomic64_inc(&tlb_cb->vm->tlb_seq);
@@ -784,7 +867,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
- struct amdgpu_vm_tlb_seq_cb *tlb_cb;
+ struct amdgpu_vm_tlb_seq_struct *tlb_cb;
struct amdgpu_res_cursor cursor;
enum amdgpu_sync_mode sync_mode;
int r, idx;
@@ -920,42 +1003,51 @@ error_unlock:
return r;
}
+static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
+ struct amdgpu_mem_stats *stats)
+{
+ struct amdgpu_vm *vm = bo_va->base.vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+
+ if (!bo)
+ return;
+
+ /*
+ * For now ignore BOs which are currently locked and potentially
+ * changing their location.
+ */
+ if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
+ !dma_resv_trylock(bo->tbo.base.resv))
+ return;
+
+ amdgpu_bo_get_memory(bo, stats);
+ if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
+ dma_resv_unlock(bo->tbo.base.resv);
+}
+
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
struct amdgpu_mem_stats *stats)
{
struct amdgpu_bo_va *bo_va, *tmp;
spin_lock(&vm->status_lock);
- list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
- list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
- if (!bo_va->base.bo)
- continue;
- amdgpu_bo_get_memory(bo_va->base.bo, stats);
- }
+ list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
+ amdgpu_vm_bo_get_memory(bo_va, stats);
spin_unlock(&vm->status_lock);
}
@@ -1358,6 +1450,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
bo_va->ref_count = 1;
+ bo_va->last_pt_update = dma_fence_get_stub();
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
@@ -1433,14 +1526,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t eaddr;
/* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
- size == 0 || size & ~PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
+ return -EINVAL;
+ if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
- if (saddr >= eaddr ||
- (bo && offset + size > amdgpu_bo_size(bo)) ||
+ if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
@@ -1499,14 +1592,14 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
int r;
/* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
- size == 0 || size & ~PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
+ return -EINVAL;
+ if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
- if (saddr >= eaddr ||
- (bo && offset + size > amdgpu_bo_size(bo)) ||
+ if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
@@ -1674,18 +1767,30 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
/* Insert partial mapping before the range */
if (!list_empty(&before->list)) {
+ struct amdgpu_bo *bo = before->bo_va->base.bo;
+
amdgpu_vm_it_insert(before, &vm->va);
if (before->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
+
+ if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
+ !before->bo_va->base.moved)
+ amdgpu_vm_bo_moved(&before->bo_va->base);
} else {
kfree(before);
}
/* Insert partial mapping after the range */
if (!list_empty(&after->list)) {
+ struct amdgpu_bo *bo = after->bo_va->base.bo;
+
amdgpu_vm_it_insert(after, &vm->va);
if (after->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
+
+ if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
+ !after->bo_va->base.moved)
+ amdgpu_vm_bo_moved(&after->bo_va->base);
} else {
kfree(after);
}
@@ -2012,13 +2117,14 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @xcp_id: GPU partition selection id
*
* Init @vm fields.
*
* Returns:
* 0 for success, error for failure.
*/
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
{
struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
@@ -2038,19 +2144,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->pt_freed);
INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
- /* create scheduler entities for page table updates */
- r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
- adev->vm_manager.vm_pte_scheds,
- adev->vm_manager.vm_pte_num_scheds, NULL);
+ r = amdgpu_vm_init_entities(adev, vm);
if (r)
return r;
- r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
- adev->vm_manager.vm_pte_scheds,
- adev->vm_manager.vm_pte_num_scheds, NULL);
- if (r)
- goto error_free_immediate;
-
vm->pte_support_ats = false;
vm->is_compute_context = false;
@@ -2067,15 +2164,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->update_funcs = &amdgpu_vm_cpu_funcs;
else
vm->update_funcs = &amdgpu_vm_sdma_funcs;
- vm->last_update = NULL;
+
+ vm->last_update = dma_fence_get_stub();
vm->last_unlocked = dma_fence_get_stub();
vm->last_tlb_flush = dma_fence_get_stub();
+ vm->generation = 0;
mutex_init(&vm->eviction_lock);
vm->evicting = false;
r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
- false, &root);
+ false, &root, xcp_id);
if (r)
goto error_free_delayed;
root_bo = &root->bo;
@@ -2110,10 +2209,7 @@ error_free_root:
error_free_delayed:
dma_fence_put(vm->last_tlb_flush);
dma_fence_put(vm->last_unlocked);
- drm_sched_entity_destroy(&vm->delayed);
-
-error_free_immediate:
- drm_sched_entity_destroy(&vm->immediate);
+ amdgpu_vm_fini_entities(vm);
return r;
}
@@ -2146,16 +2242,16 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r)
return r;
- /* Sanity checks */
- if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
- r = -EINVAL;
- goto unreserve_bo;
- }
-
/* Check if PD needs to be reinitialized and do it before
* changing any other state, in case it fails.
*/
if (pte_support_ats != vm->pte_support_ats) {
+ /* Sanity checks */
+ if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
+ r = -EINVAL;
+ goto unreserve_bo;
+ }
+
vm->pte_support_ats = pte_support_ats;
r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo),
false);
@@ -2180,19 +2276,16 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
goto unreserve_bo;
vm->update_funcs = &amdgpu_vm_cpu_funcs;
+ r = amdgpu_vm_pt_map_tables(adev, vm);
+ if (r)
+ goto unreserve_bo;
+
} else {
vm->update_funcs = &amdgpu_vm_sdma_funcs;
}
- /*
- * Make sure root PD gets mapped. As vm_update_mode could be changed
- * when turning a GFX VM into a compute VM.
- */
- r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo));
- if (r)
- goto unreserve_bo;
dma_fence_put(vm->last_update);
- vm->last_update = NULL;
+ vm->last_update = dma_fence_get_stub();
vm->is_compute_context = true;
/* Free the shadow bo for compute VM */
@@ -2266,8 +2359,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&root);
WARN_ON(vm->root.bo);
- drm_sched_entity_destroy(&vm->immediate);
- drm_sched_entity_destroy(&vm->delayed);
+ amdgpu_vm_fini_entities(vm);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
@@ -2282,8 +2374,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
dma_fence_put(vm->last_update);
- for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
- amdgpu_vmid_free_reserved(adev, vm, i);
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
+ if (vm->reserved_vmid[i]) {
+ amdgpu_vmid_free_reserved(adev, i);
+ vm->reserved_vmid[i] = false;
+ }
+ }
+
}
/**
@@ -2366,18 +2464,25 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
- int r;
+
+ /* No valid flags defined yet */
+ if (args->in.flags)
+ return -EINVAL;
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* We only have requirement to reserve vmid from gfxhub */
- r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
- AMDGPU_GFXHUB_0);
- if (r)
- return r;
+ if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
+ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
+ fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
+ }
+
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
- amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
+ if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
+ amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
+ fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
+ }
break;
default:
return -EINVAL;
@@ -2432,6 +2537,9 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* amdgpu_vm_handle_fault - graceful handling of VM faults.
* @adev: amdgpu device pointer
* @pasid: PASID of the VM
+ * @vmid: VMID, only used for GFX 9.4.3.
+ * @node_id: Node_id received in IH cookie. Only applicable for
+ * GFX 9.4.3.
* @addr: Address of the fault
* @write_fault: true is write fault, false is read fault
*
@@ -2439,7 +2547,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* shouldn't be reported any more.
*/
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- uint64_t addr, bool write_fault)
+ u32 vmid, u32 node_id, uint64_t addr,
+ bool write_fault)
{
bool is_compute_context = false;
struct amdgpu_bo *root;
@@ -2463,8 +2572,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
addr /= AMDGPU_GPU_PAGE_SIZE;
- if (is_compute_context &&
- !svm_range_restore_pages(adev, pasid, addr, write_fault)) {
+ if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
+ node_id, addr, write_fault)) {
amdgpu_bo_unref(&root);
return true;
}
@@ -2489,7 +2598,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
/* Intentionally setting invalid PTE flag
* combination to force a no-retry-fault
*/
- flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT;
+ flags = AMDGPU_VM_NORETRY_FLAGS;
value = 0;
} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
/* Redirect the access to the dummy page */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 6f085f0b4ef3..204ab13184ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -36,6 +36,8 @@
#include "amdgpu_ring.h"
#include "amdgpu_ids.h"
+struct drm_exec;
+
struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
@@ -84,7 +86,13 @@ struct amdgpu_mem_stats;
/* PDE Block Fragment Size for VEGA10 */
#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
+/* Flag combination to set no-retry with TF disabled */
+#define AMDGPU_VM_NORETRY_FLAGS (AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \
+ AMDGPU_PTE_TF)
+/* Flag combination to set no-retry with TF enabled */
+#define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \
+ AMDGPU_PTE_PRT)
/* For GFX9 */
#define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57)
#define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL)
@@ -111,11 +119,14 @@ struct amdgpu_mem_stats;
/* Reserve 4MB VRAM for page tables */
#define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
-/* max number of VMHUB */
-#define AMDGPU_MAX_VMHUBS 3
-#define AMDGPU_GFXHUB_0 0
-#define AMDGPU_MMHUB_0 1
-#define AMDGPU_MMHUB_1 2
+/*
+ * max number of VMHUB
+ * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1
+ */
+#define AMDGPU_MAX_VMHUBS 13
+#define AMDGPU_GFXHUB(x) (x)
+#define AMDGPU_MMHUB0(x) (8 + x)
+#define AMDGPU_MMHUB1(x) (8 + 4 + x)
/* Reserve 2MB at top/bottom of address space for kernel use */
#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
@@ -292,6 +303,9 @@ struct amdgpu_vm {
atomic64_t tlb_seq;
struct dma_fence *last_tlb_flush;
+ /* How many times we had to re-generate the page tables */
+ uint64_t generation;
+
/* Last unlocked submission to the scheduler entities */
struct dma_fence *last_unlocked;
@@ -326,6 +340,9 @@ struct amdgpu_vm {
struct ttm_lru_bulk_move lru_bulk_move;
/* Flag to indicate if VM is used for compute */
bool is_compute_context;
+
+ /* Memory partition number, -1 means any partition */
+ int8_t mem_id;
};
struct amdgpu_vm_manager {
@@ -383,14 +400,14 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
u32 pasid);
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
- struct list_head *validated,
- struct amdgpu_bo_list_entry *entry);
+int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
+uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
@@ -452,7 +469,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
struct amdgpu_task_info *task_info);
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
- uint64_t addr, bool write_fault);
+ u32 vmid, u32 node_id, uint64_t addr,
+ bool write_fault);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
@@ -464,7 +482,8 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int level, bool immediate, struct amdgpu_bo_vm **vmbo);
+ int level, bool immediate, struct amdgpu_bo_vm **vmbo,
+ int32_t xcp_id);
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
@@ -480,6 +499,8 @@ void amdgpu_vm_pt_free_work(struct work_struct *work);
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
#endif
+int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+
/**
* amdgpu_vm_tlb_seq - return tlb flush sequence number
* @vm: the amdgpu_vm structure to query
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 31913ae86de6..6e31621452de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -31,6 +31,7 @@
*/
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
{
+ table->bo.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
return amdgpu_bo_kmap(&table->bo, NULL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index df63dc3bca18..96d601e209b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -498,9 +498,11 @@ exit:
* @level: the page table level
* @immediate: use a immediate update
* @vmbo: pointer to the buffer object pointer
+ * @xcp_id: GPU partition id
*/
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int level, bool immediate, struct amdgpu_bo_vm **vmbo)
+ int level, bool immediate, struct amdgpu_bo_vm **vmbo,
+ int32_t xcp_id)
{
struct amdgpu_bo_param bp;
struct amdgpu_bo *bo;
@@ -512,7 +514,12 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.size = amdgpu_vm_pt_size(adev, level);
bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+ if (!adev->gmc.is_app_apu)
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ else
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+
bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
@@ -529,6 +536,8 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.type = ttm_bo_type_kernel;
bp.no_wait_gpu = immediate;
+ bp.xcp_id_plus1 = xcp_id + 1;
+
if (vm->root.bo)
bp.resv = vm->root.bo->tbo.base.resv;
@@ -553,6 +562,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.base.resv;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+ bp.xcp_id_plus1 = xcp_id + 1;
r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
@@ -564,7 +574,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
- (*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
amdgpu_bo_add_to_shadow_list(*vmbo);
return 0;
@@ -598,7 +607,8 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
return 0;
amdgpu_vm_eviction_unlock(vm);
- r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
+ r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
+ vm->root.bo->xcp_id);
amdgpu_vm_eviction_lock(vm);
if (r)
return r;
@@ -770,6 +780,27 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
1, 0, flags);
}
+/**
+ * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
+ *
+ * @adev: amdgpu_device pointer
+ * @flags: pointer to PTE flags
+ *
+ * Update PTE no-retry flags when TF is enabled.
+ */
+static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev,
+ uint64_t *flags)
+{
+ /*
+ * Update no-retry flags with the corresponding TF
+ * no-retry combination.
+ */
+ if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) {
+ *flags &= ~AMDGPU_VM_NORETRY_FLAGS;
+ *flags |= adev->gmc.noretry_flags;
+ }
+}
+
/*
* amdgpu_vm_pte_update_flags - figure out flags for PTE updates
*
@@ -781,13 +812,14 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
uint64_t pe, uint64_t addr,
unsigned int count, uint32_t incr,
uint64_t flags)
-
{
+ struct amdgpu_device *adev = params->adev;
+
if (level != AMDGPU_VM_PTB) {
flags |= AMDGPU_PDE_PTE;
- amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
+ amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
- } else if (params->adev->asic_type >= CHIP_VEGA10 &&
+ } else if (adev->asic_type >= CHIP_VEGA10 &&
!(flags & AMDGPU_PTE_VALID) &&
!(flags & AMDGPU_PTE_PRT)) {
@@ -795,6 +827,31 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
flags |= AMDGPU_PTE_EXECUTABLE;
}
+ /*
+ * Update no-retry flags to use the no-retry flag combination
+ * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination
+ * does not work when TF is enabled. So, replace them with
+ * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for
+ * all cases.
+ */
+ if (level == AMDGPU_VM_PTB)
+ amdgpu_vm_pte_update_noretry_flags(adev, &flags);
+
+ /* APUs mapping system memory may need different MTYPEs on different
+ * NUMA nodes. Only do this for contiguous ranges that can be assumed
+ * to be on the same NUMA node.
+ */
+ if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
+ adev->gmc.gmc_funcs->override_vm_pte_flags &&
+ num_possible_nodes() > 1) {
+ if (!params->pages_addr)
+ amdgpu_gmc_override_vm_pte_flags(adev, params->vm,
+ addr, &flags);
+ else
+ dev_dbg(adev->dev,
+ "override_vm_pte_flags skipped: non-contiguous\n");
+ }
+
params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
flags);
}
@@ -1020,3 +1077,31 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
return 0;
}
+
+/**
+ * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
+ * @adev: amdgpu device structure
+ * @vm: amdgpu vm structure
+ *
+ * make root page directory and everything below it cpu accessible.
+ */
+int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+ struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_vm_bo_base *entry;
+
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
+
+ struct amdgpu_bo_vm *bo;
+ int r;
+
+ if (entry->bo) {
+ bo = to_amdgpu_bo_vm(entry->bo);
+ r = vm->update_funcs->map_table(bo);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 43d6a9d6a538..c7085a747b03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -370,6 +370,45 @@ out:
return ret;
}
+static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
+{
+ DRM_DEBUG_DRIVER("Dummy vram mgr debug\n");
+}
+
+static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n");
+ return false;
+}
+
+static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n");
+ return true;
+}
+
+static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n");
+}
+
+static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ DRM_DEBUG_DRIVER("Dummy vram mgr new\n");
+ return -ENOSPC;
+}
+
/**
* amdgpu_vram_mgr_new - allocate new ranges
*
@@ -800,7 +839,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct drm_buddy *mm = &mgr->mm;
- struct drm_buddy_block *block;
+ struct amdgpu_vram_reservation *rsv;
drm_printf(printer, " vis usage:%llu\n",
amdgpu_vram_mgr_vis_usage(mgr));
@@ -812,11 +851,20 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
drm_buddy_print(mm, printer);
drm_printf(printer, "reserved:\n");
- list_for_each_entry(block, &mgr->reserved_pages, link)
- drm_buddy_block_print(mm, block, printer);
+ list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
+ drm_printf(printer, "%#018llx-%#018llx: %llu\n",
+ rsv->start, rsv->start + rsv->size, rsv->size);
mutex_unlock(&mgr->lock);
}
+static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = {
+ .alloc = amdgpu_dummy_vram_mgr_new,
+ .free = amdgpu_dummy_vram_mgr_del,
+ .intersects = amdgpu_dummy_vram_mgr_intersects,
+ .compatible = amdgpu_dummy_vram_mgr_compatible,
+ .debug = amdgpu_dummy_vram_mgr_debug
+};
+
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
.alloc = amdgpu_vram_mgr_new,
.free = amdgpu_vram_mgr_del,
@@ -841,17 +889,22 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
ttm_resource_manager_init(man, &adev->mman.bdev,
adev->gmc.real_vram_size);
- man->func = &amdgpu_vram_mgr_func;
-
- err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
- if (err)
- return err;
-
mutex_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
mgr->default_page_size = PAGE_SIZE;
+ if (!adev->gmc.is_app_apu) {
+ man->func = &amdgpu_vram_mgr_func;
+
+ err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
+ if (err)
+ return err;
+ } else {
+ man->func = &amdgpu_dummy_vram_mgr_func;
+ DRM_INFO("Setup dummy vram mgr\n");
+ }
+
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true);
return 0;
@@ -886,7 +939,8 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
drm_buddy_free_list(&mgr->mm, &rsv->allocated);
kfree(rsv);
}
- drm_buddy_fini(&mgr->mm);
+ if (!adev->gmc.is_app_apu)
+ drm_buddy_fini(&mgr->mm);
mutex_unlock(&mgr->lock);
ttm_resource_manager_cleanup(man);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
new file mode 100644
index 000000000000..565a1fa436d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_xcp.h"
+#include "amdgpu_drv.h"
+
+#include <drm/drm_drv.h>
+#include "../amdxcp/amdgpu_xcp_drv.h"
+
+static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
+ struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
+{
+ int (*run_func)(void *handle, uint32_t inst_mask);
+ int ret = 0;
+
+ if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
+ return 0;
+
+ run_func = NULL;
+
+ switch (xcp_state) {
+ case AMDGPU_XCP_PREPARE_SUSPEND:
+ run_func = xcp_ip->ip_funcs->prepare_suspend;
+ break;
+ case AMDGPU_XCP_SUSPEND:
+ run_func = xcp_ip->ip_funcs->suspend;
+ break;
+ case AMDGPU_XCP_PREPARE_RESUME:
+ run_func = xcp_ip->ip_funcs->prepare_resume;
+ break;
+ case AMDGPU_XCP_RESUME:
+ run_func = xcp_ip->ip_funcs->resume;
+ break;
+ }
+
+ if (run_func)
+ ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
+
+ return ret;
+}
+
+static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+ int state)
+{
+ struct amdgpu_xcp_ip *xcp_ip;
+ struct amdgpu_xcp *xcp;
+ int i, ret;
+
+ if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
+ return -EINVAL;
+
+ xcp = &xcp_mgr->xcp[xcp_id];
+ for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
+ xcp_ip = &xcp->ip[i];
+ ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
+{
+ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
+ AMDGPU_XCP_PREPARE_SUSPEND);
+}
+
+int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
+{
+ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
+}
+
+int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
+{
+ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
+ AMDGPU_XCP_PREPARE_RESUME);
+}
+
+int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
+{
+ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
+}
+
+static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+ struct amdgpu_xcp_ip *ip)
+{
+ struct amdgpu_xcp *xcp;
+
+ if (!ip)
+ return;
+
+ xcp = &xcp_mgr->xcp[xcp_id];
+ xcp->ip[ip->ip_id] = *ip;
+ xcp->ip[ip->ip_id].valid = true;
+
+ xcp->valid = true;
+}
+
+int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ struct amdgpu_xcp_ip ip;
+ uint8_t mem_id;
+ int i, j, ret;
+
+ if (!num_xcps || num_xcps > MAX_XCP)
+ return -EINVAL;
+
+ xcp_mgr->mode = mode;
+
+ for (i = 0; i < MAX_XCP; ++i)
+ xcp_mgr->xcp[i].valid = false;
+
+ /* This is needed for figuring out memory id of xcp */
+ xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
+
+ for (i = 0; i < num_xcps; ++i) {
+ for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
+ ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
+ &ip);
+ if (ret)
+ continue;
+
+ __amdgpu_xcp_add_block(xcp_mgr, i, &ip);
+ }
+
+ xcp_mgr->xcp[i].id = i;
+
+ if (xcp_mgr->funcs->get_xcp_mem_id) {
+ ret = xcp_mgr->funcs->get_xcp_mem_id(
+ xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
+ if (ret)
+ continue;
+ else
+ xcp_mgr->xcp[i].mem_id = mem_id;
+ }
+ }
+
+ xcp_mgr->num_xcps = num_xcps;
+ amdgpu_xcp_update_partition_sched_list(adev);
+
+ return 0;
+}
+
+int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
+{
+ int ret, curr_mode, num_xcps = 0;
+
+ if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
+ return -EINVAL;
+
+ if (xcp_mgr->mode == mode)
+ return 0;
+
+ if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
+ return 0;
+
+ mutex_lock(&xcp_mgr->xcp_lock);
+
+ curr_mode = xcp_mgr->mode;
+ /* State set to transient mode */
+ xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
+
+ ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
+
+ if (ret) {
+ /* Failed, get whatever mode it's at now */
+ if (xcp_mgr->funcs->query_partition_mode)
+ xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
+ xcp_mgr, AMDGPU_XCP_FL_LOCKED);
+ else
+ xcp_mgr->mode = curr_mode;
+
+ goto out;
+ }
+
+out:
+ mutex_unlock(&xcp_mgr->xcp_lock);
+
+ return ret;
+}
+
+int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int mode;
+
+ if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ return xcp_mgr->mode;
+
+ if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ return xcp_mgr->mode;
+
+ if (!(flags & AMDGPU_XCP_FL_LOCKED))
+ mutex_lock(&xcp_mgr->xcp_lock);
+ mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
+ if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
+ dev_WARN(
+ xcp_mgr->adev->dev,
+ "Cached partition mode %d not matching with device mode %d",
+ xcp_mgr->mode, mode);
+
+ if (!(flags & AMDGPU_XCP_FL_LOCKED))
+ mutex_unlock(&xcp_mgr->xcp_lock);
+
+ return mode;
+}
+
+static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
+{
+ struct drm_device *p_ddev;
+ struct drm_device *ddev;
+ int i, ret;
+
+ ddev = adev_to_drm(adev);
+
+ /* xcp #0 shares drm device setting with adev */
+ adev->xcp_mgr->xcp->ddev = ddev;
+
+ for (i = 1; i < MAX_XCP; i++) {
+ ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
+ if (ret == -ENOSPC) {
+ dev_warn(adev->dev,
+ "Skip xcp node #%d when out of drm node resource.", i);
+ return 0;
+ } else if (ret) {
+ return ret;
+ }
+
+ /* Redirect all IOCTLs to the primary device */
+ adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
+ adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
+ adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
+ adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
+ p_ddev->render->dev = ddev;
+ p_ddev->primary->dev = ddev;
+ p_ddev->vma_offset_manager = ddev->vma_offset_manager;
+ p_ddev->driver = &amdgpu_partition_driver;
+ adev->xcp_mgr->xcp[i].ddev = p_ddev;
+ }
+
+ return 0;
+}
+
+int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
+ int init_num_xcps,
+ struct amdgpu_xcp_mgr_funcs *xcp_funcs)
+{
+ struct amdgpu_xcp_mgr *xcp_mgr;
+
+ if (!xcp_funcs || !xcp_funcs->switch_partition_mode ||
+ !xcp_funcs->get_ip_details)
+ return -EINVAL;
+
+ xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
+
+ if (!xcp_mgr)
+ return -ENOMEM;
+
+ xcp_mgr->adev = adev;
+ xcp_mgr->funcs = xcp_funcs;
+ xcp_mgr->mode = init_mode;
+ mutex_init(&xcp_mgr->xcp_lock);
+
+ if (init_mode != AMDGPU_XCP_MODE_NONE)
+ amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
+
+ adev->xcp_mgr = xcp_mgr;
+
+ return amdgpu_xcp_dev_alloc(adev);
+}
+
+int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
+ enum AMDGPU_XCP_IP_BLOCK ip, int instance)
+{
+ struct amdgpu_xcp *xcp;
+ int i, id_mask = 0;
+
+ if (ip >= AMDGPU_XCP_MAX_BLOCKS)
+ return -EINVAL;
+
+ for (i = 0; i < xcp_mgr->num_xcps; ++i) {
+ xcp = &xcp_mgr->xcp[i];
+ if ((xcp->valid) && (xcp->ip[ip].valid) &&
+ (xcp->ip[ip].inst_mask & BIT(instance)))
+ id_mask |= BIT(i);
+ }
+
+ if (!id_mask)
+ id_mask = -ENXIO;
+
+ return id_mask;
+}
+
+int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
+ enum AMDGPU_XCP_IP_BLOCK ip,
+ uint32_t *inst_mask)
+{
+ if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
+ return -EINVAL;
+
+ *inst_mask = xcp->ip[ip].inst_mask;
+
+ return 0;
+}
+
+int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
+ const struct pci_device_id *ent)
+{
+ int i, ret;
+
+ if (!adev->xcp_mgr)
+ return 0;
+
+ for (i = 1; i < MAX_XCP; i++) {
+ if (!adev->xcp_mgr->xcp[i].ddev)
+ break;
+
+ ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
+{
+ struct drm_device *p_ddev;
+ int i;
+
+ if (!adev->xcp_mgr)
+ return;
+
+ for (i = 1; i < MAX_XCP; i++) {
+ if (!adev->xcp_mgr->xcp[i].ddev)
+ break;
+
+ p_ddev = adev->xcp_mgr->xcp[i].ddev;
+ drm_dev_unplug(p_ddev);
+ p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
+ p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
+ p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
+ p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
+ }
+}
+
+int amdgpu_xcp_open_device(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv,
+ struct drm_file *file_priv)
+{
+ int i;
+
+ if (!adev->xcp_mgr)
+ return 0;
+
+ fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ for (i = 0; i < MAX_XCP; ++i) {
+ if (!adev->xcp_mgr->xcp[i].ddev)
+ break;
+
+ if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
+ if (adev->xcp_mgr->xcp[i].valid == FALSE) {
+ dev_err(adev->dev, "renderD%d partition %d not valid!",
+ file_priv->minor->index, i);
+ return -ENOENT;
+ }
+ dev_dbg(adev->dev, "renderD%d partition %d opened!",
+ file_priv->minor->index, i);
+ fpriv->xcp_id = i;
+ break;
+ }
+ }
+
+ fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
+ adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
+ return 0;
+}
+
+void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
+ struct amdgpu_ctx_entity *entity)
+{
+ struct drm_gpu_scheduler *sched;
+ struct amdgpu_ring *ring;
+
+ if (!adev->xcp_mgr)
+ return;
+
+ sched = entity->entity.rq->sched;
+ if (sched->ready) {
+ ring = to_amdgpu_ring(entity->entity.rq->sched);
+ atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
new file mode 100644
index 000000000000..9a1036aeec2a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_XCP_H
+#define AMDGPU_XCP_H
+
+#include <linux/pci.h>
+#include <linux/xarray.h>
+
+#include "amdgpu_ctx.h"
+
+#define MAX_XCP 8
+
+#define AMDGPU_XCP_MODE_NONE -1
+#define AMDGPU_XCP_MODE_TRANS -2
+
+#define AMDGPU_XCP_FL_NONE 0
+#define AMDGPU_XCP_FL_LOCKED (1 << 0)
+
+#define AMDGPU_XCP_NO_PARTITION (~0)
+
+struct amdgpu_fpriv;
+
+enum AMDGPU_XCP_IP_BLOCK {
+ AMDGPU_XCP_GFXHUB,
+ AMDGPU_XCP_GFX,
+ AMDGPU_XCP_SDMA,
+ AMDGPU_XCP_VCN,
+ AMDGPU_XCP_MAX_BLOCKS
+};
+
+enum AMDGPU_XCP_STATE {
+ AMDGPU_XCP_PREPARE_SUSPEND,
+ AMDGPU_XCP_SUSPEND,
+ AMDGPU_XCP_PREPARE_RESUME,
+ AMDGPU_XCP_RESUME,
+};
+
+struct amdgpu_xcp_ip_funcs {
+ int (*prepare_suspend)(void *handle, uint32_t inst_mask);
+ int (*suspend)(void *handle, uint32_t inst_mask);
+ int (*prepare_resume)(void *handle, uint32_t inst_mask);
+ int (*resume)(void *handle, uint32_t inst_mask);
+};
+
+struct amdgpu_xcp_ip {
+ struct amdgpu_xcp_ip_funcs *ip_funcs;
+ uint32_t inst_mask;
+
+ enum AMDGPU_XCP_IP_BLOCK ip_id;
+ bool valid;
+};
+
+struct amdgpu_xcp {
+ struct amdgpu_xcp_ip ip[AMDGPU_XCP_MAX_BLOCKS];
+
+ uint8_t id;
+ uint8_t mem_id;
+ bool valid;
+ atomic_t ref_cnt;
+ struct drm_device *ddev;
+ struct drm_device *rdev;
+ struct drm_device *pdev;
+ struct drm_driver *driver;
+ struct drm_vma_offset_manager *vma_offset_manager;
+ struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
+};
+
+struct amdgpu_xcp_mgr {
+ struct amdgpu_device *adev;
+ struct mutex xcp_lock;
+ struct amdgpu_xcp_mgr_funcs *funcs;
+
+ struct amdgpu_xcp xcp[MAX_XCP];
+ uint8_t num_xcps;
+ int8_t mode;
+
+ /* Used to determine KFD memory size limits per XCP */
+ unsigned int num_xcp_per_mem_partition;
+};
+
+struct amdgpu_xcp_mgr_funcs {
+ int (*switch_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr, int mode,
+ int *num_xcps);
+ int (*query_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr);
+ int (*get_ip_details)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+ enum AMDGPU_XCP_IP_BLOCK ip_id,
+ struct amdgpu_xcp_ip *ip);
+ int (*get_xcp_mem_id)(struct amdgpu_xcp_mgr *xcp_mgr,
+ struct amdgpu_xcp *xcp, uint8_t *mem_id);
+
+ int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+ int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+ int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+ int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+ int (*select_scheds)(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds);
+ int (*update_partition_sched_list)(struct amdgpu_device *adev);
+};
+
+int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+
+int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
+ int init_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs);
+int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode);
+int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode);
+int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
+ enum AMDGPU_XCP_IP_BLOCK ip, int instance);
+
+int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
+ enum AMDGPU_XCP_IP_BLOCK ip,
+ uint32_t *inst_mask);
+
+int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
+ const struct pci_device_id *ent);
+void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev);
+int amdgpu_xcp_open_device(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv,
+ struct drm_file *file_priv);
+void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
+ struct amdgpu_ctx_entity *entity);
+
+#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
+ ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
+ (adev)->xcp_mgr->funcs->select_scheds ? \
+ (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT)
+#define amdgpu_xcp_update_partition_sched_list(adev) \
+ ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
+ (adev)->xcp_mgr->funcs->update_partition_sched_list ? \
+ (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0)
+
+static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ if (!xcp_mgr)
+ return 1;
+ else
+ return xcp_mgr->num_xcps;
+}
+
+static inline struct amdgpu_xcp *
+amdgpu_get_next_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int *from)
+{
+ if (!xcp_mgr)
+ return NULL;
+
+ while (*from < MAX_XCP) {
+ if (xcp_mgr->xcp[*from].valid)
+ return &xcp_mgr->xcp[*from];
+ ++(*from);
+ }
+
+ return NULL;
+}
+
+#define for_each_xcp(xcp_mgr, xcp, i) \
+ for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \
+ xcp = amdgpu_get_next_xcp(xcp_mgr, &i))
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 439925477fb8..7e91b24784e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -325,6 +325,36 @@ static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
}
+static ssize_t amdgpu_xgmi_show_num_hops(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ for (i = 0; i < top->num_nodes; i++)
+ sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_hops);
+
+ return sysfs_emit(buf, "%s\n", buf);
+}
+
+static ssize_t amdgpu_xgmi_show_num_links(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ for (i = 0; i < top->num_nodes; i++)
+ sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_links);
+
+ return sysfs_emit(buf, "%s\n", buf);
+}
+
#define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801)
static ssize_t amdgpu_xgmi_show_error(struct device *dev,
struct device_attribute *attr,
@@ -361,6 +391,8 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
+static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL);
+static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL);
static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive)
@@ -380,6 +412,15 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
if (ret)
pr_err("failed to create xgmi_error\n");
+ /* Create xgmi num hops file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_num_hops);
+ if (ret)
+ pr_err("failed to create xgmi_num_hops\n");
+
+ /* Create xgmi num links file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_num_links);
+ if (ret)
+ pr_err("failed to create xgmi_num_links\n");
/* Create sysfs link to hive info folder on the first device */
if (hive->kobj.parent != (&adev->dev->kobj)) {
@@ -407,6 +448,9 @@ remove_link:
remove_file:
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
+ device_remove_file(adev->dev, &dev_attr_xgmi_error);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
success:
return ret;
@@ -420,6 +464,8 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
device_remove_file(adev->dev, &dev_attr_xgmi_error);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
if (hive->kobj.parent != (&adev->dev->kobj))
sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
@@ -454,6 +500,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
hive = kzalloc(sizeof(*hive), GFP_KERNEL);
if (!hive) {
dev_err(adev->dev, "XGMI: allocation failed\n");
+ ret = -ENOMEM;
hive = NULL;
goto pro_end;
}
@@ -1014,7 +1061,8 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
}
/* Trigger XGMI/WAFL error */
-static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, void *inject_if)
+static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
+ void *inject_if, uint32_t instance_mask)
{
int ret = 0;
struct ta_ras_trigger_error_input *block_info =
@@ -1026,7 +1074,7 @@ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, void *injec
if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
dev_warn(adev->dev, "Failed to disallow XGMI power down");
- ret = psp_ras_trigger_error(&adev->psp, block_info);
+ ret = psp_ras_trigger_error(&adev->psp, block_info, instance_mask);
if (amdgpu_ras_intr_triggered())
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 24d42d24e6a0..104a5ad8397d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -70,7 +70,6 @@ enum amd_sriov_ucode_engine_id {
AMD_SRIOV_UCODE_ID_RLC_SRLS,
AMD_SRIOV_UCODE_ID_MEC,
AMD_SRIOV_UCODE_ID_MEC2,
- AMD_SRIOV_UCODE_ID_IMU,
AMD_SRIOV_UCODE_ID_SOS,
AMD_SRIOV_UCODE_ID_ASD,
AMD_SRIOV_UCODE_ID_TA_RAS,
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
new file mode 100644
index 000000000000..d0fc62784e82
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "amdgpu_xcp.h"
+#include "gfx_v9_4_3.h"
+#include "gfxhub_v1_2.h"
+#include "sdma_v4_4_2.h"
+
+#define XCP_INST_MASK(num_inst, xcp_id) \
+ (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
+
+#define AMDGPU_XCP_OPS_KFD (1 << 0)
+
+void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
+
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
+
+ adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
+ adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
+
+ adev->doorbell_index.sdma_doorbell_range = 20;
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->doorbell_index.sdma_engine[i] =
+ AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
+ i * (adev->doorbell_index.sdma_doorbell_range >> 1);
+
+ adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
+ adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
+
+ adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
+ adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
+
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
+}
+
+static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
+ uint32_t inst_idx, struct amdgpu_ring *ring)
+{
+ int xcp_id;
+ enum AMDGPU_XCP_IP_BLOCK ip_blk;
+ uint32_t inst_mask;
+
+ ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ return;
+
+ inst_mask = 1 << inst_idx;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_RING_TYPE_COMPUTE:
+ case AMDGPU_RING_TYPE_KIQ:
+ ip_blk = AMDGPU_XCP_GFX;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ ip_blk = AMDGPU_XCP_SDMA;
+ break;
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ ip_blk = AMDGPU_XCP_VCN;
+ if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+ inst_mask = 1 << (inst_idx * 2);
+ break;
+ default:
+ DRM_ERROR("Not support ring type %d!", ring->funcs->type);
+ return;
+ }
+
+ for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
+ if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
+ ring->xcp_id = xcp_id;
+ break;
+ }
+ }
+}
+
+static void aqua_vanjaram_xcp_gpu_sched_update(
+ struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int sel_xcp_id)
+{
+ unsigned int *num_gpu_sched;
+
+ num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
+ .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
+ adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
+ .sched[(*num_gpu_sched)++] = &ring->sched;
+ DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
+ sel_xcp_id, ring->funcs->type,
+ ring->hw_prio, *num_gpu_sched);
+}
+
+static int aqua_vanjaram_xcp_sched_list_update(
+ struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
+ memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
+ }
+
+ if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ return 0;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ ring = adev->rings[i];
+ if (!ring || !ring->sched.ready || ring->no_scheduler)
+ continue;
+
+ aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+
+ /* VCN is shared by two partitions under CPX MODE */
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+ adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+ aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
+ }
+
+ return 0;
+}
+
+static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
+ else
+ aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
+ }
+
+ return aqua_vanjaram_xcp_sched_list_update(adev);
+}
+
+static int aqua_vanjaram_select_scheds(
+ struct amdgpu_device *adev,
+ u32 hw_ip,
+ u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds)
+{
+ u32 sel_xcp_id;
+ int i;
+
+ if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
+ u32 least_ref_cnt = ~0;
+
+ fpriv->xcp_id = 0;
+ for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
+ u32 total_ref_cnt;
+
+ total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
+ if (total_ref_cnt < least_ref_cnt) {
+ fpriv->xcp_id = i;
+ least_ref_cnt = total_ref_cnt;
+ }
+ }
+ }
+ sel_xcp_id = fpriv->xcp_id;
+
+ if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
+ *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
+ *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
+ atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
+ DRM_DEBUG("Selected partition #%d", sel_xcp_id);
+ } else {
+ DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int8_t inst)
+{
+ int8_t dev_inst;
+
+ switch (block) {
+ case GC_HWIP:
+ case SDMA0_HWIP:
+ /* Both JPEG and VCN as JPEG is only alias of VCN */
+ case VCN_HWIP:
+ dev_inst = adev->ip_map.dev_inst[block][inst];
+ break;
+ default:
+ /* For rest of the IPs, no look up required.
+ * Assume 'logical instance == physical instance' for all configs. */
+ dev_inst = inst;
+ break;
+ }
+
+ return dev_inst;
+}
+
+static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ uint32_t mask)
+{
+ uint32_t dev_mask = 0;
+ int8_t log_inst, dev_inst;
+
+ while (mask) {
+ log_inst = ffs(mask) - 1;
+ dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
+ dev_mask |= (1 << dev_inst);
+ mask &= ~(1 << log_inst);
+ }
+
+ return dev_mask;
+}
+
+static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type ip_block,
+ uint32_t inst_mask)
+{
+ int l = 0, i;
+
+ while (inst_mask) {
+ i = ffs(inst_mask) - 1;
+ adev->ip_map.dev_inst[ip_block][l++] = i;
+ inst_mask &= ~(1 << i);
+ }
+ for (; l < HWIP_MAX_INSTANCE; l++)
+ adev->ip_map.dev_inst[ip_block][l] = -1;
+}
+
+void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
+{
+ u32 ip_map[][2] = {
+ { GC_HWIP, adev->gfx.xcc_mask },
+ { SDMA0_HWIP, adev->sdma.sdma_mask },
+ { VCN_HWIP, adev->vcn.inst_mask },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
+ aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
+
+ adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
+ adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
+}
+
+/* Fixed pattern for smn addressing on different AIDs:
+ * bit[34]: indicate cross AID access
+ * bit[33:32]: indicate target AID id
+ * AID id range is 0 ~ 3 as maximum AID number is 4.
+ */
+u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
+{
+ u64 ext_offset;
+
+ /* local routing and bit[34:32] will be zeros */
+ if (ext_id == 0)
+ return 0;
+
+ /* Initiated from host, accessing to all non-zero aids are cross traffic */
+ ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
+
+ return ext_offset;
+}
+
+static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
+ struct amdgpu_device *adev = xcp_mgr->adev;
+
+ if (adev->nbio.funcs->get_compute_partition_mode)
+ mode = adev->nbio.funcs->get_compute_partition_mode(adev);
+
+ return mode;
+}
+
+static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
+{
+ int num_xcc, num_xcc_per_xcp = 0;
+
+ num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
+
+ switch (mode) {
+ case AMDGPU_SPX_PARTITION_MODE:
+ num_xcc_per_xcp = num_xcc;
+ break;
+ case AMDGPU_DPX_PARTITION_MODE:
+ num_xcc_per_xcp = num_xcc / 2;
+ break;
+ case AMDGPU_TPX_PARTITION_MODE:
+ num_xcc_per_xcp = num_xcc / 3;
+ break;
+ case AMDGPU_QPX_PARTITION_MODE:
+ num_xcc_per_xcp = num_xcc / 4;
+ break;
+ case AMDGPU_CPX_PARTITION_MODE:
+ num_xcc_per_xcp = 1;
+ break;
+ }
+
+ return num_xcc_per_xcp;
+}
+
+static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+ enum AMDGPU_XCP_IP_BLOCK ip_id,
+ struct amdgpu_xcp_ip *ip)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
+ int num_sdma, num_vcn;
+
+ num_sdma = adev->sdma.num_instances;
+ num_vcn = adev->vcn.num_vcn_inst;
+
+ switch (xcp_mgr->mode) {
+ case AMDGPU_SPX_PARTITION_MODE:
+ num_sdma_xcp = num_sdma;
+ num_vcn_xcp = num_vcn;
+ break;
+ case AMDGPU_DPX_PARTITION_MODE:
+ num_sdma_xcp = num_sdma / 2;
+ num_vcn_xcp = num_vcn / 2;
+ break;
+ case AMDGPU_TPX_PARTITION_MODE:
+ num_sdma_xcp = num_sdma / 3;
+ num_vcn_xcp = num_vcn / 3;
+ break;
+ case AMDGPU_QPX_PARTITION_MODE:
+ num_sdma_xcp = num_sdma / 4;
+ num_vcn_xcp = num_vcn / 4;
+ break;
+ case AMDGPU_CPX_PARTITION_MODE:
+ num_sdma_xcp = 2;
+ num_vcn_xcp = num_vcn ? 1 : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
+
+ switch (ip_id) {
+ case AMDGPU_XCP_GFXHUB:
+ ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
+ ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
+ break;
+ case AMDGPU_XCP_GFX:
+ ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
+ ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
+ break;
+ case AMDGPU_XCP_SDMA:
+ ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
+ ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
+ break;
+ case AMDGPU_XCP_VCN:
+ ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id);
+ /* TODO : Assign IP funcs */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ip->ip_id = ip_id;
+
+ return 0;
+}
+
+static enum amdgpu_gfx_partition
+__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int num_xcc;
+
+ num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
+
+ if (adev->gmc.num_mem_partitions == 1)
+ return AMDGPU_SPX_PARTITION_MODE;
+
+ if (adev->gmc.num_mem_partitions == num_xcc)
+ return AMDGPU_CPX_PARTITION_MODE;
+
+ if (adev->gmc.num_mem_partitions == num_xcc / 2)
+ return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
+ AMDGPU_QPX_PARTITION_MODE;
+
+ if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
+ return AMDGPU_DPX_PARTITION_MODE;
+
+ return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
+}
+
+static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
+ enum amdgpu_gfx_partition mode)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int num_xcc, num_xccs_per_xcp;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ switch (mode) {
+ case AMDGPU_SPX_PARTITION_MODE:
+ return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
+ case AMDGPU_DPX_PARTITION_MODE:
+ return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
+ case AMDGPU_TPX_PARTITION_MODE:
+ return (adev->gmc.num_mem_partitions == 1 ||
+ adev->gmc.num_mem_partitions == 3) &&
+ ((num_xcc % 3) == 0);
+ case AMDGPU_QPX_PARTITION_MODE:
+ num_xccs_per_xcp = num_xcc / 4;
+ return (adev->gmc.num_mem_partitions == 1 ||
+ adev->gmc.num_mem_partitions == 4) &&
+ (num_xccs_per_xcp >= 2);
+ case AMDGPU_CPX_PARTITION_MODE:
+ return ((num_xcc > 1) &&
+ (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 4) &&
+ (num_xcc % adev->gmc.num_mem_partitions) == 0);
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ /* TODO:
+ * Stop user queues and threads, and make sure GPU is empty of work.
+ */
+
+ if (flags & AMDGPU_XCP_OPS_KFD)
+ amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
+
+ return 0;
+}
+
+static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & AMDGPU_XCP_OPS_KFD) {
+ amdgpu_amdkfd_device_probe(xcp_mgr->adev);
+ amdgpu_amdkfd_device_init(xcp_mgr->adev);
+ /* If KFD init failed, return failure */
+ if (!xcp_mgr->adev->kfd.init_complete)
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode, int *num_xcps)
+{
+ int num_xcc_per_xcp, num_xcc, ret;
+ struct amdgpu_device *adev;
+ u32 flags = 0;
+
+ adev = xcp_mgr->adev;
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
+ mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
+ } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
+ dev_err(adev->dev,
+ "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
+ amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
+ return -EINVAL;
+ }
+
+ if (adev->kfd.init_complete)
+ flags |= AMDGPU_XCP_OPS_KFD;
+
+ if (flags & AMDGPU_XCP_OPS_KFD) {
+ ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
+ if (ret)
+ goto out;
+ }
+
+ ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
+ if (ret)
+ goto unlock;
+
+ num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
+ if (adev->gfx.funcs->switch_partition_mode)
+ adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
+ num_xcc_per_xcp);
+
+ /* Init info about new xcps */
+ *num_xcps = num_xcc / num_xcc_per_xcp;
+ amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
+
+ ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
+unlock:
+ if (flags & AMDGPU_XCP_OPS_KFD)
+ amdgpu_amdkfd_unlock_kfd(adev);
+out:
+ return ret;
+}
+
+static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
+ int xcc_id, uint8_t *mem_id)
+{
+ /* memory/spatial modes validation check is already done */
+ *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
+ *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
+
+ return 0;
+}
+
+static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
+ struct amdgpu_xcp *xcp, uint8_t *mem_id)
+{
+ struct amdgpu_numa_info numa_info;
+ struct amdgpu_device *adev;
+ uint32_t xcc_mask;
+ int r, i, xcc_id;
+
+ adev = xcp_mgr->adev;
+ /* TODO: BIOS is not returning the right info now
+ * Check on this later
+ */
+ /*
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ */
+ if (adev->gmc.num_mem_partitions == 1) {
+ /* Only one range */
+ *mem_id = 0;
+ return 0;
+ }
+
+ r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
+ if (r || !xcc_mask)
+ return -EINVAL;
+
+ xcc_id = ffs(xcc_mask) - 1;
+ if (!adev->gmc.is_app_apu)
+ return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
+
+ r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
+
+ if (r)
+ return r;
+
+ r = -EINVAL;
+ for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+ if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
+ *mem_id = i;
+ r = 0;
+ break;
+ }
+ }
+
+ return r;
+}
+
+static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+ enum AMDGPU_XCP_IP_BLOCK ip_id,
+ struct amdgpu_xcp_ip *ip)
+{
+ if (!ip)
+ return -EINVAL;
+
+ return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
+}
+
+struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
+ .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
+ .query_partition_mode = &aqua_vanjaram_query_partition_mode,
+ .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
+ .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
+ .select_scheds = &aqua_vanjaram_select_scheds,
+ .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
+};
+
+static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
+ &aqua_vanjaram_xcp_funcs);
+ if (ret)
+ return ret;
+
+ /* TODO: Default memory node affinity init */
+
+ return ret;
+}
+
+int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
+{
+ u32 mask, inst_mask = adev->sdma.sdma_mask;
+ int ret, i;
+
+ /* generally 1 AID supports 4 instances */
+ adev->sdma.num_inst_per_aid = 4;
+ adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
+
+ adev->aid_mask = i = 1;
+ inst_mask >>= adev->sdma.num_inst_per_aid;
+
+ for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
+ inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
+ if ((inst_mask & mask) == mask)
+ adev->aid_mask |= (1 << i);
+ }
+
+ /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
+ * addressed based on logical instance ids.
+ */
+ adev->vcn.harvest_config = 0;
+ adev->vcn.num_inst_per_aid = 1;
+ adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
+ adev->jpeg.harvest_config = 0;
+ adev->jpeg.num_inst_per_aid = 1;
+ adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
+
+ ret = aqua_vanjaram_xcp_mgr_init(adev);
+ if (ret)
+ return ret;
+
+ aqua_vanjaram_ip_map_init(adev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 1c5d9388ad0b..9f63ddb89b75 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1438,6 +1438,8 @@ static void atom_get_vbios_pn(struct atom_context *ctx)
ctx->vbios_pn[count] = 0;
}
+
+ pr_info("ATOM BIOS: %s\n", ctx->vbios_pn);
}
static void atom_get_vbios_version(struct atom_context *ctx)
@@ -1460,11 +1462,9 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
int base;
struct atom_context *ctx =
kzalloc(sizeof(struct atom_context), GFP_KERNEL);
- char *str;
struct _ATOM_ROM_HEADER *atom_rom_header;
struct _ATOM_MASTER_DATA_TABLE *master_table;
struct _ATOM_FIRMWARE_INFO *atom_fw_info;
- u16 idx;
if (!ctx)
return NULL;
@@ -1502,16 +1502,6 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
return NULL;
}
- idx = CU16(ATOM_ROM_PART_NUMBER_PTR);
- if (idx == 0)
- idx = 0x80;
-
- str = CSTR(idx);
- if (*str != '\0') {
- pr_info("ATOM BIOS: %s\n", str);
- strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
- }
-
atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base);
if (atom_rom_header->usMasterDataTableOffset != 0) {
master_table = (struct _ATOM_MASTER_DATA_TABLE *)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index 0c1839824520..c11cf18a0f18 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -33,7 +33,6 @@ struct drm_device;
#define ATOM_ATI_MAGIC_PTR 0x30
#define ATOM_ATI_MAGIC " 761295520"
#define ATOM_ROM_TABLE_PTR 0x48
-#define ATOM_ROM_PART_NUMBER_PTR 0x6E
#define ATOM_ROM_MAGIC "ATOM"
#define ATOM_ROM_MAGIC_PTR 4
@@ -118,12 +117,15 @@ struct drm_device;
struct card_info {
struct drm_device *dev;
- void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
- uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
- void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
- uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
- void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
- uint32_t (* pll_read)(struct card_info *, uint32_t); /* filled by driver */
+ void (*reg_write)(struct card_info *info,
+ u32 reg, uint32_t val); /* filled by driver */
+ uint32_t (*reg_read)(struct card_info *info, uint32_t reg); /* filled by driver */
+ void (*mc_write)(struct card_info *info,
+ u32 reg, uint32_t val); /* filled by driver */
+ uint32_t (*mc_read)(struct card_info *info, uint32_t reg); /* filled by driver */
+ void (*pll_write)(struct card_info *info,
+ u32 reg, uint32_t val); /* filled by driver */
+ uint32_t (*pll_read)(struct card_info *info, uint32_t reg); /* filled by driver */
};
struct atom_context {
@@ -143,7 +145,6 @@ struct atom_context {
int io_mode;
uint32_t *scratch;
int scratch_size_bytes;
- char vbios_version[20];
uint8_t name[STRLEN_LONG];
uint8_t vbios_pn[STRLEN_LONG];
@@ -154,10 +155,10 @@ struct atom_context {
extern int amdgpu_atom_debug;
-struct atom_context *amdgpu_atom_parse(struct card_info *, void *);
-int amdgpu_atom_execute_table(struct atom_context *, int, uint32_t *);
-int amdgpu_atom_asic_init(struct atom_context *);
-void amdgpu_atom_destroy(struct atom_context *);
+struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios);
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
+int amdgpu_atom_asic_init(struct atom_context *ctx);
+void amdgpu_atom_destroy(struct atom_context *ctx);
bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
uint8_t *frev, uint8_t *crev, uint16_t *data_start);
bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index de6d10390ab2..5641cf05d856 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1141,12 +1141,12 @@ static uint32_t cik_get_register_value(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index df385ffc9768..6f7c031dd197 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -442,8 +442,7 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &cik_ih_funcs;
}
-const struct amdgpu_ip_block_version cik_ih_ip_block =
-{
+const struct amdgpu_ip_block_version cik_ih_ip_block = {
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 2,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 67d16236b216..52598fbc9b39 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -489,8 +489,6 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
#endif
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
-
- ring->sched.ready = true;
}
cik_sdma_enable(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9a24ed463abd..584cd5277f92 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -52,8 +52,7 @@
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
-static const u32 crtc_offsets[] =
-{
+static const u32 crtc_offsets[] = {
CRTC0_REGISTER_OFFSET,
CRTC1_REGISTER_OFFSET,
CRTC2_REGISTER_OFFSET,
@@ -63,8 +62,7 @@ static const u32 crtc_offsets[] =
CRTC6_REGISTER_OFFSET
};
-static const u32 hpd_offsets[] =
-{
+static const u32 hpd_offsets[] = {
HPD0_REGISTER_OFFSET,
HPD1_REGISTER_OFFSET,
HPD2_REGISTER_OFFSET,
@@ -121,30 +119,26 @@ static const struct {
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const u32 golden_settings_tonga_a11[] =
-{
+static const u32 golden_settings_tonga_a11[] = {
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
mmFBC_MISC, 0x1f311fff, 0x12300000,
mmHDMI_CONTROL, 0x31000111, 0x00000011,
};
-static const u32 tonga_mgcg_cgcg_init[] =
-{
+static const u32 tonga_mgcg_cgcg_init[] = {
mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
};
-static const u32 golden_settings_fiji_a10[] =
-{
+static const u32 golden_settings_fiji_a10[] = {
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
mmFBC_MISC, 0x1f311fff, 0x12300000,
mmHDMI_CONTROL, 0x31000111, 0x00000011,
};
-static const u32 fiji_mgcg_cgcg_init[] =
-{
+static const u32 fiji_mgcg_cgcg_init[] = {
mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
};
@@ -1425,8 +1419,7 @@ static void dce_v10_0_audio_enable(struct amdgpu_device *adev,
enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
}
-static const u32 pin_offsets[] =
-{
+static const u32 pin_offsets[] = {
AUD0_REGISTER_OFFSET,
AUD1_REGISTER_OFFSET,
AUD2_REGISTER_OFFSET,
@@ -1811,8 +1804,7 @@ static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
}
}
-static const u32 vga_control_regs[6] =
-{
+static const u32 vga_control_regs[6] = {
mmD1VGA_CONTROL,
mmD2VGA_CONTROL,
mmD3VGA_CONTROL,
@@ -3651,8 +3643,7 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
}
-const struct amdgpu_ip_block_version dce_v10_0_ip_block =
-{
+const struct amdgpu_ip_block_version dce_v10_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 10,
.minor = 0,
@@ -3660,8 +3651,7 @@ const struct amdgpu_ip_block_version dce_v10_0_ip_block =
.funcs = &dce_v10_0_ip_funcs,
};
-const struct amdgpu_ip_block_version dce_v10_1_ip_block =
-{
+const struct amdgpu_ip_block_version dce_v10_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 10,
.minor = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index d421a268c9ff..f2b3cb5ed6be 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -53,8 +53,7 @@
static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
-static const u32 crtc_offsets[6] =
-{
+static const u32 crtc_offsets[6] = {
CRTC0_REGISTER_OFFSET,
CRTC1_REGISTER_OFFSET,
CRTC2_REGISTER_OFFSET,
@@ -63,8 +62,7 @@ static const u32 crtc_offsets[6] =
CRTC5_REGISTER_OFFSET
};
-static const u32 hpd_offsets[] =
-{
+static const u32 hpd_offsets[] = {
HPD0_REGISTER_OFFSET,
HPD1_REGISTER_OFFSET,
HPD2_REGISTER_OFFSET,
@@ -1345,9 +1343,9 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (sad->channels > max_channels) {
value = (sad->channels <<
AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
- (sad->byte2 <<
+ (sad->byte2 <<
AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
- (sad->freq <<
+ (sad->freq <<
AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
max_channels = sad->channels;
}
@@ -1379,8 +1377,7 @@ static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
}
-static const u32 pin_offsets[7] =
-{
+static const u32 pin_offsets[7] = {
(0x1780 - 0x1780),
(0x1786 - 0x1780),
(0x178c - 0x1780),
@@ -1740,8 +1737,7 @@ static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
}
}
-static const u32 vga_control_regs[6] =
-{
+static const u32 vga_control_regs[6] = {
mmD1VGA_CONTROL,
mmD2VGA_CONTROL,
mmD3VGA_CONTROL,
@@ -1895,9 +1891,9 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
- (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
- (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
+ (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
#ifdef __BIG_ENDIAN
fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
@@ -3151,7 +3147,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
@@ -3544,8 +3540,7 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
}
-const struct amdgpu_ip_block_version dce_v8_0_ip_block =
-{
+const struct amdgpu_ip_block_version dce_v8_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 8,
.minor = 0,
@@ -3553,8 +3548,7 @@ const struct amdgpu_ip_block_version dce_v8_0_ip_block =
.funcs = &dce_v8_0_ip_funcs,
};
-const struct amdgpu_ip_block_version dce_v8_1_ip_block =
-{
+const struct amdgpu_ip_block_version dce_v8_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 8,
.minor = 1,
@@ -3562,8 +3556,7 @@ const struct amdgpu_ip_block_version dce_v8_1_ip_block =
.funcs = &dce_v8_0_ip_funcs,
};
-const struct amdgpu_ip_block_version dce_v8_2_ip_block =
-{
+const struct amdgpu_ip_block_version dce_v8_2_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 8,
.minor = 2,
@@ -3571,8 +3564,7 @@ const struct amdgpu_ip_block_version dce_v8_2_ip_block =
.funcs = &dce_v8_0_ip_funcs,
};
-const struct amdgpu_ip_block_version dce_v8_3_ip_block =
-{
+const struct amdgpu_ip_block_version dce_v8_3_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 8,
.minor = 3,
@@ -3580,8 +3572,7 @@ const struct amdgpu_ip_block_version dce_v8_3_ip_block =
.funcs = &dce_v8_0_ip_funcs,
};
-const struct amdgpu_ip_block_version dce_v8_5_ip_block =
-{
+const struct amdgpu_ip_block_version dce_v8_5_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 8,
.minor = 5,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f5b5ce1051a2..0aee9c8288a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -271,8 +271,7 @@ MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_10_3_7_mec2.bin");
MODULE_FIRMWARE("amdgpu/gc_10_3_7_rlc.bin");
-static const struct soc15_reg_golden golden_settings_gc_10_1[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_1[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
@@ -315,13 +314,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
};
-static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = {
/* Pending on emulation bring up */
};
-static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000, 0x0),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
@@ -1376,8 +1373,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
};
-static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
@@ -1418,8 +1414,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
};
-static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
@@ -1464,13 +1459,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
};
-static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = {
/* Pending on emulation bring up */
};
-static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000L, 0x0),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
@@ -2093,13 +2086,11 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
};
-static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] = {
/* Pending on emulation bring up */
};
-static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000L, 0x0),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
@@ -3154,8 +3145,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
};
-static const struct soc15_reg_golden golden_settings_gc_10_3[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
@@ -3164,7 +3154,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
- SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCUTCL2_CGTT_CLK_CTRL_Sienna_Cichlid, 0xff000000, 0xff008080),
@@ -3201,13 +3191,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
};
-static const struct soc15_reg_golden golden_settings_gc_10_3_sienna_cichlid[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_sienna_cichlid[] = {
/* Pending on emulation bring up */
};
-static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_2[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
@@ -3254,8 +3242,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020),
};
-static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4),
@@ -3285,8 +3272,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020),
};
-static const struct soc15_reg_golden golden_settings_gc_10_3_3[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_3[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
@@ -3309,8 +3295,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00100000)
};
-static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0x30000000, 0x30000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0x7e000000, 0x7e000100),
@@ -3380,7 +3365,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX,0xfff7ffff, 0x01030000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
};
@@ -3421,8 +3406,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_cyan_skillfish[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
};
-static const struct soc15_reg_golden golden_settings_gc_10_3_6[] =
-{
+static const struct soc15_reg_golden golden_settings_gc_10_3_6[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
@@ -3490,7 +3474,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev);
static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance);
+ u32 sh_num, u32 instance, int xcc_id);
static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev);
@@ -3506,6 +3490,8 @@ static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev);
static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
uint16_t pasid, uint32_t flush_type,
bool all_hub, uint8_t dst_sel);
+static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
+ unsigned int vmid);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -3568,7 +3554,7 @@ static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
struct amdgpu_device *adev = kiq_ring->adev;
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
- if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) {
+ if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
return;
}
@@ -3636,7 +3622,7 @@ static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
- adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
+ adev->gfx.kiq[0].pmf = &gfx_v10_0_kiq_pm4_funcs;
}
static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
@@ -3714,8 +3700,8 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
break;
case IP_VERSION(10, 3, 4):
soc15_program_register_sequence(adev,
- golden_settings_gc_10_3_4,
- (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4));
+ golden_settings_gc_10_3_4,
+ (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4));
break;
case IP_VERSION(10, 3, 5):
soc15_program_register_sequence(adev,
@@ -3782,7 +3768,7 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
uint32_t tmp = 0;
- unsigned i;
+ unsigned int i;
int r;
WREG32(scratch, 0xCAFEDEAD);
@@ -3820,7 +3806,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- unsigned index;
+ unsigned int index;
uint64_t gpu_addr;
volatile uint32_t *cpu_ptr;
long r;
@@ -3951,7 +3937,7 @@ static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
break;
}
- return ret ;
+ return ret;
}
static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
@@ -4151,7 +4137,7 @@ static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
{
struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
- reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
@@ -4159,14 +4145,14 @@ static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(10, 3, 0):
- reg_access_ctrl->spare_int =
- SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid);
- break;
- default:
- reg_access_ctrl->spare_int =
- SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
- break;
+ case IP_VERSION(10, 3, 0):
+ reg_access_ctrl->spare_int =
+ SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid);
+ break;
+ default:
+ reg_access_ctrl->spare_int =
+ SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
+ break;
}
adev->gfx.rlc.rlcg_reg_access_supported = true;
}
@@ -4187,11 +4173,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
return r;
}
- /* init spm vmid with 0xf */
- if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
-
-
return 0;
}
@@ -4213,13 +4194,13 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
int r;
u32 *hpd;
const __le32 *fw_data = NULL;
- unsigned fw_size;
+ unsigned int fw_size;
u32 *fw = NULL;
size_t mec_hpd_size;
const struct gfx_firmware_header_v1_0 *mec_hdr = NULL;
- bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+ bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
@@ -4291,11 +4272,12 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
*(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
}
-static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
/* in gfx10 the SIMD_ID is specified as part of the INSTANCE
* field when performing a select_se_sh so it should be
- * zero here */
+ * zero here
+ */
WARN_ON(simd != 0);
/* type 2 wave data */
@@ -4318,7 +4300,7 @@ static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
}
-static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t start,
uint32_t size, uint32_t *dst)
{
@@ -4329,7 +4311,7 @@ static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
dst);
}
-static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t start, uint32_t size,
uint32_t *dst)
@@ -4340,7 +4322,7 @@ static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
}
static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 q, u32 vm)
+ u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{
nv_grbm_select(adev, me, pipe, q, vm);
}
@@ -4461,7 +4443,7 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
else
ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
- ring->vm_hub = AMDGPU_GFXHUB_0;
+ ring->vm_hub = AMDGPU_GFXHUB(0);
sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
@@ -4474,7 +4456,7 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int mec, int pipe, int queue)
{
- unsigned irq_type;
+ unsigned int irq_type;
struct amdgpu_ring *ring;
unsigned int hw_prio;
@@ -4490,7 +4472,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX10_MEC_HPD_SIZE);
- ring->vm_hub = AMDGPU_GFXHUB_0;
+ ring->vm_hub = AMDGPU_GFXHUB(0);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
@@ -4550,7 +4532,7 @@ static int gfx_v10_0_sw_init(void *handle)
/* KIQ event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT,
- &adev->gfx.kiq.irq);
+ &adev->gfx.kiq[0].irq);
if (r)
return r;
@@ -4614,8 +4596,8 @@ static int gfx_v10_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
- j))
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
continue;
r = gfx_v10_0_compute_ring_init(adev, ring_id,
@@ -4629,19 +4611,19 @@ static int gfx_v10_0_sw_init(void *handle)
}
if (!adev->enable_mes_kiq) {
- r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE);
+ r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n");
return r;
}
- kiq = &adev->gfx.kiq;
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
+ kiq = &adev->gfx.kiq[0];
+ r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
if (r)
return r;
}
- r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd));
+ r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd), 0);
if (r)
return r;
@@ -4690,11 +4672,11 @@ static int gfx_v10_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_gfx_mqd_sw_fini(adev);
+ amdgpu_gfx_mqd_sw_fini(adev, 0);
if (!adev->enable_mes_kiq) {
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
- amdgpu_gfx_kiq_fini(adev);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
+ amdgpu_gfx_kiq_fini(adev, 0);
}
gfx_v10_0_pfp_fini(adev);
@@ -4712,7 +4694,7 @@ static int gfx_v10_0_sw_fini(void *handle)
}
static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance)
+ u32 sh_num, u32 instance, int xcc_id)
{
u32 data;
@@ -4772,13 +4754,13 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
(adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6))) &&
((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
continue;
- gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v10_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
@@ -4795,7 +4777,8 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade
uint32_t pa_sc_tile_steering_override;
/* for ASICs that integrates GFX v10.3
- * pa_sc_tile_steering_override should be set to 0 */
+ * pa_sc_tile_steering_override should be set to 0
+ */
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
return 0;
@@ -4825,6 +4808,29 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade
#define DEFAULT_SH_MEM_BASES (0x6000)
+static void gfx_v10_0_debug_trap_config_init(struct amdgpu_device *adev,
+ uint32_t first_vmid,
+ uint32_t last_vmid)
+{
+ uint32_t data;
+ uint32_t trap_config_vmid_mask = 0;
+ int i;
+
+ /* Calculate trap config vmid mask */
+ for (i = first_vmid; i < last_vmid; i++)
+ trap_config_vmid_mask |= (1 << i);
+
+ data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG,
+ VMID_SEL, trap_config_vmid_mask);
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
+ TRAP_EN, 1);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
+}
+
static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
{
int i;
@@ -4848,14 +4854,19 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- /* Initialize all compute VMIDs to have no GDS, GWS, or OA
- access. These should be enabled by FW for target VMIDs. */
+ /*
+ * Initialize all compute VMIDs to have no GDS, GWS, or OA
+ * access. These should be enabled by FW for target VMIDs.
+ */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
}
+
+ gfx_v10_0_debug_trap_config_init(adev, adev->vm_manager.first_kfd_vmid,
+ AMDGPU_NUM_VMID);
}
static void gfx_v10_0_init_gds_vmid(struct amdgpu_device *adev)
@@ -4907,7 +4918,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0);
wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
/*
* Set corresponding TCP bits for the inactive WGPs in
@@ -4940,7 +4951,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
}
}
- gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -4978,7 +4989,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
+ for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
nv_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
@@ -5082,8 +5093,10 @@ static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
static void gfx_v10_0_rlc_start(struct amdgpu_device *adev)
{
- /* TODO: enable rlc & smu handshake until smu
- * and gfxoff feature works as expected */
+ /*
+ * TODO: enable rlc & smu handshake until smu
+ * and gfxoff feature works as expected
+ */
if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
gfx_v10_0_rlc_smu_handshake_cntl(adev, false);
@@ -5106,7 +5119,7 @@ static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
{
const struct rlc_firmware_header_v2_0 *hdr;
const __le32 *fw_data;
- unsigned i, fw_size;
+ unsigned int i, fw_size;
if (!adev->gfx.rlc_fw)
return -EINVAL;
@@ -5143,6 +5156,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
gfx_v10_0_init_csb(adev);
+ gfx_v10_0_update_spm_vmid_internal(adev, 0xf);
+
if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
gfx_v10_0_rlc_enable_srm(adev);
} else {
@@ -5173,6 +5188,8 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
gfx_v10_0_init_csb(adev);
+ gfx_v10_0_update_spm_vmid_internal(adev, 0xf);
+
adev->gfx.rlc.funcs->start(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
@@ -5181,6 +5198,7 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
}
+
return 0;
}
@@ -5648,11 +5666,10 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2))
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
- } else {
+ else
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
- }
if (adev->job_hang && !enable)
return 0;
@@ -5674,7 +5691,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
int r;
const struct gfx_firmware_header_v1_0 *pfp_hdr;
const __le32 *fw_data;
- unsigned i, fw_size;
+ unsigned int i, fw_size;
uint32_t tmp;
uint32_t usec_timeout = 50000; /* wait for 50ms */
@@ -5752,7 +5769,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
int r;
const struct gfx_firmware_header_v1_0 *ce_hdr;
const __le32 *fw_data;
- unsigned i, fw_size;
+ unsigned int i, fw_size;
uint32_t tmp;
uint32_t usec_timeout = 50000; /* wait for 50ms */
@@ -5829,7 +5846,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
int r;
const struct gfx_firmware_header_v1_0 *me_hdr;
const __le32 *fw_data;
- unsigned i, fw_size;
+ unsigned int i, fw_size;
uint32_t tmp;
uint32_t usec_timeout = 50000; /* wait for 50ms */
@@ -6073,7 +6090,6 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
u32 tmp;
u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr;
- u32 i;
/* Set the write pointer delay */
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
@@ -6168,11 +6184,6 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
gfx_v10_0_cp_gfx_start(adev);
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- ring->sched.ready = true;
- }
-
return 0;
}
@@ -6214,7 +6225,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
break;
}
- adev->gfx.kiq.ring.sched.ready = false;
+ adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
}
@@ -6223,7 +6234,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
{
const struct gfx_firmware_header_v1_0 *mec_hdr;
const __le32 *fw_data;
- unsigned i;
+ unsigned int i;
u32 tmp;
u32 usec_timeout = 50000; /* Wait for 50 ms */
@@ -6423,55 +6434,6 @@ static int gfx_v10_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-#ifdef BRING_UP_DEBUG
-static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- struct v10_gfx_mqd *mqd = ring->mqd_ptr;
-
- /* set mmCP_GFX_HQD_WPTR/_HI to 0 */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
-
- /* set GFX_MQD_BASE */
- WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
- WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
-
- /* set GFX_MQD_CONTROL */
- WREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
-
- /* set GFX_HQD_VMID to 0 */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
-
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY,
- mqd->cp_gfx_hqd_queue_priority);
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
-
- /* set GFX_HQD_BASE, similar as CP_RB_BASE */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
-
- /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
-
- /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
-
- /* set RB_WPTR_POLL_ADDR */
- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
-
- /* set RB_DOORBELL_CONTROL */
- WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
-
- /* active the queue */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
-
- return 0;
-}
-#endif
-
static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -6492,59 +6454,23 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
if (ring->doorbell_index == adev->doorbell_index.gfx_ring0 << 1)
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
-#ifdef BRING_UP_DEBUG
- gfx_v10_0_gfx_queue_init_register(ring);
-#endif
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (amdgpu_in_reset(adev)) {
- /* reset mqd with the backup copy */
+ } else {
+ /* restore mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
*ring->wptr_cpu_addr = 0;
amdgpu_ring_clear_ring(ring);
-#ifdef BRING_UP_DEBUG
- mutex_lock(&adev->srbm_mutex);
- nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- gfx_v10_0_gfx_queue_init_register(ring);
- nv_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-#endif
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
}
-#ifndef BRING_UP_DEBUG
-static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
- int r, i;
-
- if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
- return -EINVAL;
-
- r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
- adev->gfx.num_gfx_rings);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- return r;
- }
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
-
- return amdgpu_ring_test_helper(kiq_ring);
-}
-#endif
-
static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
{
int r, i;
@@ -6555,7 +6481,7 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
- goto done;
+ return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
@@ -6565,23 +6491,14 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
}
amdgpu_bo_unreserve(ring->mqd_obj);
if (r)
- goto done;
+ return r;
}
-#ifndef BRING_UP_DEBUG
- r = gfx_v10_0_kiq_enable_kgq(adev);
- if (r)
- goto done;
-#endif
- r = gfx_v10_0_cp_gfx_start(adev);
+
+ r = amdgpu_gfx_enable_kgq(adev, 0);
if (r)
- goto done;
+ return r;
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- ring->sched.ready = true;
- }
-done:
- return r;
+ return gfx_v10_0_cp_gfx_start(adev);
}
static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
@@ -6812,14 +6729,13 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v10_compute_mqd *mqd = ring->mqd_ptr;
- int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
gfx_v10_0_kiq_setting(ring);
if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
/* reset ring buffer */
ring->wptr = 0;
@@ -6841,8 +6757,8 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
}
return 0;
@@ -6864,17 +6780,14 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
+ } else {
+ /* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
-
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
@@ -6885,21 +6798,22 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0))
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
return r;
+ }
gfx_v10_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
- ring->sched.ready = true;
return 0;
}
@@ -6927,7 +6841,7 @@ static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
goto done;
}
- r = amdgpu_gfx_enable_kcq(adev);
+ r = amdgpu_gfx_enable_kcq(adev, 0);
done:
return r;
}
@@ -6999,8 +6913,10 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
{
uint32_t data, pattern = 0xDEADBEEF;
- /* check if mmVGT_ESGS_RING_SIZE_UMD
- * has been remapped to mmVGT_ESGS_RING_SIZE */
+ /*
+ * check if mmVGT_ESGS_RING_SIZE_UMD
+ * has been remapped to mmVGT_ESGS_RING_SIZE
+ */
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 2):
@@ -7011,12 +6927,10 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid) == pattern) {
- WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD , data);
+ WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
return true;
- } else {
- WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, data);
- return false;
}
+ WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, data);
break;
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 3):
@@ -7031,12 +6945,12 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) {
WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
return true;
- } else {
- WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
- return false;
}
+ WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
break;
}
+
+ return false;
}
static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
@@ -7046,8 +6960,10 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return;
- /* initialize cam_index to 0
- * index will auto-inc after each data writting */
+ /*
+ * Initialize cam_index to 0
+ * index will auto-inc after each data writing
+ */
WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
switch (adev->ip_versions[GC_HWIP][0]) {
@@ -7177,6 +7093,7 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
static void gfx_v10_0_disable_gpa_mode(struct amdgpu_device *adev)
{
uint32_t data;
+
data = RREG32_SOC15(GC, 0, mmCPC_PSP_DEBUG);
data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
WREG32_SOC15(GC, 0, mmCPC_PSP_DEBUG, data);
@@ -7240,47 +7157,20 @@ static int gfx_v10_0_hw_init(void *handle)
return r;
}
-#ifndef BRING_UP_DEBUG
-static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- int i;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
- adev->gfx.num_gfx_rings))
- return -ENOMEM;
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
- PREEMPT_QUEUES, 0, 0);
- if (!adev->job_hang)
- return amdgpu_ring_test_helper(kiq_ring);
- else
- return 0;
-}
-#endif
-
static int gfx_v10_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
if (!adev->no_hw_access) {
-#ifndef BRING_UP_DEBUG
if (amdgpu_async_gfx_ring) {
- r = gfx_v10_0_kiq_disable_kgq(adev);
- if (r)
+ if (amdgpu_gfx_disable_kgq(adev, 0))
DRM_ERROR("KGQ disable failed\n");
}
-#endif
- if (amdgpu_gfx_disable_kcq(adev))
+
+ if (amdgpu_gfx_disable_kcq(adev, 0))
DRM_ERROR("KCQ disable failed\n");
}
@@ -7320,7 +7210,7 @@ static bool gfx_v10_0_is_idle(void *handle)
static int gfx_v10_0_wait_for_idle(void *handle)
{
- unsigned i;
+ unsigned int i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -7572,10 +7462,10 @@ static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev)
return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
}
-static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
- unsigned i;
+ unsigned int i;
data = RLC_SAFE_MODE__CMD_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
@@ -7613,7 +7503,7 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
@@ -7960,7 +7850,7 @@ static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_d
static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable) {
/* enable FGCG firstly*/
@@ -7999,17 +7889,16 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
AMD_CG_SUPPORT_GFX_3D_CGLS))
gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
-static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
+ unsigned int vmid)
{
u32 reg, data;
- amdgpu_gfx_off_ctrl(adev, false);
-
/* not for *_SOC15 */
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
@@ -8024,6 +7913,13 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
+{
+ amdgpu_gfx_off_ctrl(adev, false);
+
+ gfx_v10_0_update_spm_vmid_internal(adev, vmid);
amdgpu_gfx_off_ctrl(adev, true);
}
@@ -8093,11 +7989,11 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
static void gfx_v10_cntl_pg(struct amdgpu_device *adev, bool enable)
{
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
gfx_v10_cntl_power_gating(adev, enable);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
@@ -8152,8 +8048,14 @@ static int gfx_v10_0_set_powergating_state(void *handle,
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 7):
+ if (!enable)
+ amdgpu_gfx_off_ctrl(adev, false);
+
gfx_v10_cntl_pg(adev, enable);
- amdgpu_gfx_off_ctrl(adev, enable);
+
+ if (enable)
+ amdgpu_gfx_off_ctrl(adev, true);
+
break;
default:
break;
@@ -8395,7 +8297,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
uint32_t flags)
{
- unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+ unsigned int vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -8405,7 +8307,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+ if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
if (flags & AMDGPU_IB_PREEMPTED)
@@ -8436,7 +8338,7 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
uint32_t flags)
{
- unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+ unsigned int vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
if (ring->is_mes_queue)
@@ -8471,7 +8373,7 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
}
static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
- u64 seq, unsigned flags)
+ u64 seq, unsigned int flags)
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
@@ -8527,7 +8429,7 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
}
static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
if (ring->is_mes_queue)
gfx_v10_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
@@ -8580,7 +8482,7 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
{
uint32_t dw2 = 0;
- if (amdgpu_mcbp)
+ if (ring->adev->gfx.mcbp)
gfx_v10_0_ring_emit_ce_meta(ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
@@ -8609,9 +8511,9 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0);
}
-static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
+static unsigned int gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
{
- unsigned ret;
+ unsigned int ret;
amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
@@ -8623,9 +8525,10 @@ static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
return ret;
}
-static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
+static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned int offset)
{
- unsigned cur;
+ unsigned int cur;
+
BUG_ON(offset > ring->buf_mask);
BUG_ON(ring->ring[offset] != 0x55aa55aa);
@@ -8640,7 +8543,7 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
@@ -8848,7 +8751,7 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
}
static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned vmid)
+ unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
uint32_t value = 0;
@@ -8957,7 +8860,7 @@ static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev
static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
switch (type) {
@@ -9054,7 +8957,7 @@ static int gfx_v10_0_eop_irq(struct amdgpu_device *adev,
static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
switch (state) {
@@ -9073,7 +8976,7 @@ static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
switch (state) {
@@ -9148,7 +9051,7 @@ static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
uint32_t tmp, target;
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+ struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
if (ring->me == 1)
target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
@@ -9192,7 +9095,7 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
u8 me_id, pipe_id, queue_id;
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+ struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -9369,7 +9272,7 @@ static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
+ adev->gfx.kiq[0].ring.funcs = &gfx_v10_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
@@ -9403,8 +9306,8 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs;
- adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
- adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs;
+ adev->gfx.kiq[0].irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
+ adev->gfx.kiq[0].irq.funcs = &gfx_v10_0_kiq_irq_funcs;
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
@@ -9440,7 +9343,7 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
{
- unsigned total_cu = adev->gfx.config.max_cu_per_sh *
+ unsigned int total_cu = adev->gfx.config.max_cu_per_sh *
adev->gfx.config.max_sh_per_se *
adev->gfx.config.max_shader_engines;
@@ -9521,7 +9424,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
{
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
- unsigned disable_masks[4 * 2];
+ unsigned int disable_masks[4 * 2];
if (!adev || !cu_info)
return -EINVAL;
@@ -9541,7 +9444,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 4 && j < 2)
gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
adev, disable_masks[i * 2 + j]);
@@ -9562,7 +9465,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
}
- gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
@@ -9638,8 +9541,7 @@ static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev)
(0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT));
}
-const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
-{
+const struct amdgpu_ip_block_version gfx_v10_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GFX,
.major = 10,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index f5c376276984..5c3db694afa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -112,7 +112,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance);
+ u32 sh_num, u32 instance, int xcc_id);
static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
@@ -123,8 +123,8 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
uint16_t pasid, uint32_t flush_type,
bool all_hub, uint8_t dst_sel);
-static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
-static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
+static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
+static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
bool enable);
@@ -192,7 +192,7 @@ static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
struct amdgpu_device *adev = kiq_ring->adev;
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
- if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) {
+ if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
return;
}
@@ -260,7 +260,7 @@ static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
- adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs;
+ adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
}
static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
@@ -463,6 +463,27 @@ out:
return err;
}
+static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ if ((adev->gfx.me_fw_version >= 1505) &&
+ (adev->gfx.pfp_fw_version >= 1600) &&
+ (adev->gfx.mec_fw_version >= 512)) {
+ if (amdgpu_sriov_vf(adev))
+ adev->gfx.cp_gfx_shadow = true;
+ else
+ adev->gfx.cp_gfx_shadow = false;
+ }
+ break;
+ default:
+ adev->gfx.cp_gfx_shadow = false;
+ break;
+ }
+}
+
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
@@ -539,6 +560,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
/* only one MEC for gfx 11.0.0. */
adev->gfx.mec2_fw = NULL;
+ gfx_v11_0_check_fw_cp_gfx_shadow(adev);
out:
if (err) {
amdgpu_ucode_release(&adev->gfx.pfp_fw);
@@ -645,7 +667,7 @@ static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
{
struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
- reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
@@ -699,7 +721,7 @@ static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
u32 *hpd;
size_t mec_hpd_size;
- bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+ bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
@@ -747,7 +769,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
*(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
}
-static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
/* in gfx11 the SIMD_ID is specified as part of the INSTANCE
* field when performing a select_se_sh so it should be
@@ -773,7 +795,7 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
}
-static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t start,
uint32_t size, uint32_t *dst)
{
@@ -784,7 +806,7 @@ static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
dst);
}
-static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t start, uint32_t size,
uint32_t *dst)
@@ -795,11 +817,32 @@ static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
}
static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 q, u32 vm)
+ u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{
soc21_grbm_select(adev, me, pipe, q, vm);
}
+/* all sizes are in bytes */
+#define MQD_SHADOW_BASE_SIZE 73728
+#define MQD_SHADOW_BASE_ALIGNMENT 256
+#define MQD_FWWORKAREA_SIZE 484
+#define MQD_FWWORKAREA_ALIGNMENT 256
+
+static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info)
+{
+ if (adev->gfx.cp_gfx_shadow) {
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+ return 0;
+ } else {
+ memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
+ return -ENOTSUPP;
+ }
+}
+
static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v11_0_select_se_sh,
@@ -808,6 +851,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
.update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
+ .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
};
static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@ -866,7 +910,7 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
else
ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
- ring->vm_hub = AMDGPU_GFXHUB_0;
+ ring->vm_hub = AMDGPU_GFXHUB(0);
sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
@@ -897,7 +941,7 @@ static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX11_MEC_HPD_SIZE);
- ring->vm_hub = AMDGPU_GFXHUB_0;
+ ring->vm_hub = AMDGPU_GFXHUB(0);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
@@ -1367,8 +1411,8 @@ static int gfx_v11_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
- j))
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
continue;
r = gfx_v11_0_compute_ring_init(adev, ring_id,
@@ -1382,19 +1426,19 @@ static int gfx_v11_0_sw_init(void *handle)
}
if (!adev->enable_mes_kiq) {
- r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE);
+ r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n");
return r;
}
- kiq = &adev->gfx.kiq;
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
+ kiq = &adev->gfx.kiq[0];
+ r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
if (r)
return r;
}
- r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd));
+ r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0);
if (r)
return r;
@@ -1456,11 +1500,11 @@ static int gfx_v11_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_gfx_mqd_sw_fini(adev);
+ amdgpu_gfx_mqd_sw_fini(adev, 0);
if (!adev->enable_mes_kiq) {
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
- amdgpu_gfx_kiq_fini(adev);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
+ amdgpu_gfx_kiq_fini(adev, 0);
}
gfx_v11_0_pfp_fini(adev);
@@ -1477,7 +1521,7 @@ static int gfx_v11_0_sw_fini(void *handle)
}
static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance)
+ u32 sh_num, u32 instance, int xcc_id)
{
u32 data;
@@ -1598,6 +1642,7 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
/* Enable trap for each kfd vmid. */
data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data);
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
@@ -1667,7 +1712,7 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
+ for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
soc21_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
@@ -3188,7 +3233,6 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
u32 tmp;
u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr;
- u32 i;
/* Set the write pointer delay */
WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
@@ -3280,11 +3324,6 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
gfx_v11_0_cp_gfx_start(adev);
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- ring->sched.ready = true;
- }
-
return 0;
}
@@ -3330,8 +3369,6 @@ static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
}
- adev->gfx.kiq.ring.sched.ready = enable;
-
udelay(50);
}
@@ -3633,55 +3670,6 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-#ifdef BRING_UP_DEBUG
-static int gfx_v11_0_gfx_queue_init_register(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- struct v11_gfx_mqd *mqd = ring->mqd_ptr;
-
- /* set mmCP_GFX_HQD_WPTR/_HI to 0 */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
-
- /* set GFX_MQD_BASE */
- WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
- WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
-
- /* set GFX_MQD_CONTROL */
- WREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
-
- /* set GFX_HQD_VMID to 0 */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
-
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY,
- mqd->cp_gfx_hqd_queue_priority);
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
-
- /* set GFX_HQD_BASE, similar as CP_RB_BASE */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
-
- /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
-
- /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
-
- /* set RB_WPTR_POLL_ADDR */
- WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
- WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
-
- /* set RB_DOORBELL_CONTROL */
- WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
-
- /* active the queue */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
-
- return 0;
-}
-#endif
-
static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3693,59 +3681,23 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
amdgpu_ring_init_mqd(ring);
-#ifdef BRING_UP_DEBUG
- gfx_v11_0_gfx_queue_init_register(ring);
-#endif
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (amdgpu_in_reset(adev)) {
- /* reset mqd with the backup copy */
+ } else {
+ /* restore mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
*ring->wptr_cpu_addr = 0;
amdgpu_ring_clear_ring(ring);
-#ifdef BRING_UP_DEBUG
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- gfx_v11_0_gfx_queue_init_register(ring);
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-#endif
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
}
-#ifndef BRING_UP_DEBUG
-static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
- int r, i;
-
- if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
- return -EINVAL;
-
- r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
- adev->gfx.num_gfx_rings);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- return r;
- }
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
-
- return amdgpu_ring_test_helper(kiq_ring);
-}
-#endif
-
static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
{
int r, i;
@@ -3756,7 +3708,7 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
- goto done;
+ return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
@@ -3766,23 +3718,14 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
}
amdgpu_bo_unreserve(ring->mqd_obj);
if (r)
- goto done;
+ return r;
}
-#ifndef BRING_UP_DEBUG
- r = gfx_v11_0_kiq_enable_kgq(adev);
- if (r)
- goto done;
-#endif
- r = gfx_v11_0_cp_gfx_start(adev);
+
+ r = amdgpu_gfx_enable_kgq(adev, 0);
if (r)
- goto done;
+ return r;
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- ring->sched.ready = true;
- }
-done:
- return r;
+ return gfx_v11_0_cp_gfx_start(adev);
}
static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
@@ -4028,14 +3971,13 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v11_compute_mqd *mqd = ring->mqd_ptr;
- int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
gfx_v11_0_kiq_setting(ring);
if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
/* reset ring buffer */
ring->wptr = 0;
@@ -4057,8 +3999,8 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
}
return 0;
@@ -4080,17 +4022,14 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
+ } else {
+ /* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
-
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
@@ -4101,7 +4040,7 @@ static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
@@ -4146,7 +4085,7 @@ static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
goto done;
}
- r = amdgpu_gfx_enable_kcq(adev);
+ r = amdgpu_gfx_enable_kcq(adev, 0);
done:
return r;
}
@@ -4239,7 +4178,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
false : true;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
- amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
+ amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
return 0;
}
@@ -4407,48 +4346,20 @@ static int gfx_v11_0_hw_init(void *handle)
return r;
}
-#ifndef BRING_UP_DEBUG
-static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- int i, r = 0;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
- adev->gfx.num_gfx_rings))
- return -ENOMEM;
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
- PREEMPT_QUEUES, 0, 0);
-
- if (adev->gfx.kiq.ring.sched.ready)
- r = amdgpu_ring_test_helper(kiq_ring);
-
- return r;
-}
-#endif
-
static int gfx_v11_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
if (!adev->no_hw_access) {
-#ifndef BRING_UP_DEBUG
if (amdgpu_async_gfx_ring) {
- r = gfx_v11_0_kiq_disable_kgq(adev);
- if (r)
+ if (amdgpu_gfx_disable_kgq(adev, 0))
DRM_ERROR("KGQ disable failed\n");
}
-#endif
- if (amdgpu_gfx_disable_kcq(adev))
+
+ if (amdgpu_gfx_disable_kcq(adev, 0))
DRM_ERROR("KCQ disable failed\n");
amdgpu_mes_kiq_hw_fini(adev);
@@ -4525,7 +4436,7 @@ static int gfx_v11_0_soft_reset(void *handle)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_set_safe_mode(adev);
+ gfx_v11_0_set_safe_mode(adev, 0);
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
@@ -4625,7 +4536,7 @@ static int gfx_v11_0_soft_reset(void *handle)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_unset_safe_mode(adev);
+ gfx_v11_0_unset_safe_mode(adev, 0);
return gfx_v11_0_cp_resume(adev);
}
@@ -4667,24 +4578,27 @@ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
uint64_t clock;
uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
- amdgpu_gfx_off_ctrl(adev, false);
- mutex_lock(&adev->gfx.gpu_clock_mutex);
if (amdgpu_sriov_vf(adev)) {
+ amdgpu_gfx_off_ctrl(adev, false);
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
if (clock_counter_hi_pre != clock_counter_hi_after)
clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
} else {
+ preempt_disable();
clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
if (clock_counter_hi_pre != clock_counter_hi_after)
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
+ preempt_enable();
}
clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
- mutex_unlock(&adev->gfx.gpu_clock_mutex);
- amdgpu_gfx_off_ctrl(adev, true);
+
return clock;
}
@@ -4740,26 +4654,6 @@ static int gfx_v11_0_early_init(void *handle)
return gfx_v11_0_init_microcode(adev);
}
-static int gfx_v11_0_ras_late_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if *gfx_common_if;
- int ret;
-
- gfx_common_if = kzalloc(sizeof(struct ras_common_if), GFP_KERNEL);
- if (!gfx_common_if)
- return -ENOMEM;
-
- gfx_common_if->block = AMDGPU_RAS_BLOCK__GFX;
-
- ret = amdgpu_ras_feature_enable(adev, gfx_common_if, true);
- if (ret)
- dev_warn(adev->dev, "Failed to enable gfx11 ras feature\n");
-
- kfree(gfx_common_if);
- return 0;
-}
-
static int gfx_v11_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -4773,12 +4667,6 @@ static int gfx_v11_0_late_init(void *handle)
if (r)
return r;
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) {
- r = gfx_v11_0_ras_late_init(handle);
- if (r)
- return r;
- }
-
return 0;
}
@@ -4791,7 +4679,7 @@ static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
}
-static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
@@ -4810,7 +4698,7 @@ static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
}
@@ -5038,7 +4926,7 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
@@ -5058,7 +4946,7 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
AMD_CG_SUPPORT_GFX_3D_CGLS))
gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5126,11 +5014,11 @@ static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
{
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
gfx_v11_cntl_power_gating(adev, enable);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v11_0_set_powergating_state(void *handle,
@@ -5150,8 +5038,14 @@ static int gfx_v11_0_set_powergating_state(void *handle,
break;
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
+ if (!enable)
+ amdgpu_gfx_off_ctrl(adev, false);
+
gfx_v11_cntl_pg(adev, enable);
- amdgpu_gfx_off_ctrl(adev, enable);
+
+ if (enable)
+ amdgpu_gfx_off_ctrl(adev, true);
+
break;
default:
break;
@@ -5395,7 +5289,7 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+ if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
if (flags & AMDGPU_IB_PREEMPTED)
@@ -5583,6 +5477,29 @@ static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0);
}
+static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
+ u64 shadow_va, u64 csa_va,
+ u64 gds_va, bool init_shadow,
+ int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (!adev->gfx.cp_gfx_shadow)
+ return;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7));
+ amdgpu_ring_write(ring, lower_32_bits(shadow_va));
+ amdgpu_ring_write(ring, upper_32_bits(shadow_va));
+ amdgpu_ring_write(ring, lower_32_bits(gds_va));
+ amdgpu_ring_write(ring, upper_32_bits(gds_va));
+ amdgpu_ring_write(ring, lower_32_bits(csa_va));
+ amdgpu_ring_write(ring, upper_32_bits(csa_va));
+ amdgpu_ring_write(ring, shadow_va ?
+ PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0);
+ amdgpu_ring_write(ring, init_shadow ?
+ PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0);
+}
+
static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
@@ -5614,7 +5531,7 @@ static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
@@ -6082,7 +5999,7 @@ static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
uint32_t tmp, target;
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+ struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
target += ring->pipe;
@@ -6173,6 +6090,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.set_wptr = gfx_v11_0_ring_set_wptr_gfx,
.emit_frame_size = /* totally 242 maximum if 16 IBs */
5 + /* COND_EXEC */
+ 9 + /* SET_Q_PREEMPTION_MODE */
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
@@ -6199,6 +6117,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
+ .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
.init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec,
.preempt_ib = gfx_v11_0_ring_preempt_ib,
@@ -6279,7 +6198,7 @@ static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq;
+ adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
@@ -6428,7 +6347,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
counter = 0;
- gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 8 && j < 2)
gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
adev, disable_masks[i * 2 + j]);
@@ -6460,7 +6379,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
active_cu_number += counter;
}
}
- gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
index 068b9586a223..26d6286d86c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
@@ -84,8 +84,20 @@ static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev,
/* Workaround: when vmid and pasid are both zero, trigger gpu reset in KGD. */
if (entry && (entry->client_id == SOC21_IH_CLIENTID_GFX) &&
(entry->src_id == GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT) &&
- !entry->vmid && !entry->pasid)
+ !entry->vmid && !entry->pasid) {
+ uint32_t rlc_status0 = 0;
+
+ rlc_status0 = RREG32_SOC15(GC, 0, regRLC_RLCS_FED_STATUS_0);
+
+ if (REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA0_FED_ERR) ||
+ REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA1_FED_ERR)) {
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ }
+
amdgpu_ras_reset_gpu(adev);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index c41219e23151..da6caff78c22 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1285,7 +1285,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance)
+ u32 sh_num, u32 instance, int xcc_id)
{
u32 data;
@@ -1438,12 +1438,12 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
}
/* GRBM_GFX_INDEX has a different offset on SI */
- gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
}
/* GRBM_GFX_INDEX has a different offset on SI */
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
}
static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
@@ -1459,14 +1459,14 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v6_0_get_rb_active_bitmap(adev);
active_rbs |= data <<
((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
@@ -1487,7 +1487,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
/* cache the values for userspace */
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
adev->gfx.config.rb_config[i][j].rb_backend_disable =
RREG32(mmCC_RB_BACKEND_DISABLE);
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
@@ -1496,7 +1496,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
RREG32(mmPA_SC_RASTER_CONFIG);
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -1535,7 +1535,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
active_cu = gfx_v6_0_get_cu_enabled(adev);
@@ -1550,7 +1550,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
}
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -2391,7 +2391,7 @@ static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
WREG32_FIELD(RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
if (!enable) {
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmSPI_LB_CU_MASK, 0x00ff);
}
}
@@ -2968,7 +2968,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
*(out++) = RREG32(mmSQ_IND_DATA);
}
-static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
/* type 0 wave data */
dst[(*no_fields)++] = 0;
@@ -2993,7 +2993,7 @@ static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
}
-static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t start,
uint32_t size, uint32_t *dst)
{
@@ -3003,7 +3003,7 @@ static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
}
static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 q, u32 vm)
+ u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{
DRM_INFO("Not implemented\n");
}
@@ -3028,6 +3028,7 @@ static int gfx_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->gfx.xcc_mask = 1;
adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
GFX6_NUM_COMPUTE_RINGS);
@@ -3073,7 +3074,7 @@ static int gfx_v6_0_sw_init(void *handle)
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
- r = amdgpu_ring_init(adev, ring, 1024,
+ r = amdgpu_ring_init(adev, ring, 2048,
&adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -3571,7 +3572,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 4 && j < 2)
gfx_v6_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
@@ -3593,7 +3594,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 9d5c1e29b4a3..90b034b173c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -90,8 +90,7 @@ MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
-static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
-{
+static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = {
{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
@@ -110,8 +109,7 @@ static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
};
-static const u32 spectre_rlc_save_restore_register_list[] =
-{
+static const u32 spectre_rlc_save_restore_register_list[] = {
(0x0e00 << 16) | (0xc12c >> 2),
0x00000000,
(0x0e00 << 16) | (0xc140 >> 2),
@@ -557,8 +555,7 @@ static const u32 spectre_rlc_save_restore_register_list[] =
(0x0e00 << 16) | (0x9600 >> 2),
};
-static const u32 kalindi_rlc_save_restore_register_list[] =
-{
+static const u32 kalindi_rlc_save_restore_register_list[] = {
(0x0e00 << 16) | (0xc12c >> 2),
0x00000000,
(0x0e00 << 16) | (0xc140 >> 2),
@@ -933,7 +930,8 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
case CHIP_MULLINS:
chip_name = "mullins";
break;
- default: BUG();
+ default:
+ BUG();
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
@@ -1548,11 +1546,12 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
* @sh_num: sh block to address
* @instance: Certain registers are instanced per SE or SH.
* 0xffffffff means broadcast to all SEs or SHs (CIK).
- *
+ * @xcc_id: xcc accelerated compute core id
* Select which SE, SH combinations to address.
*/
static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
- u32 se_num, u32 sh_num, u32 instance)
+ u32 se_num, u32 sh_num, u32 instance,
+ int xcc_id)
{
u32 data;
@@ -1732,13 +1731,13 @@ gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
}
/* GRBM_GFX_INDEX has a different offset on CI+ */
- gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
}
/* GRBM_GFX_INDEX has a different offset on CI+ */
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
}
/**
@@ -1761,13 +1760,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v7_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
@@ -1790,7 +1789,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
/* cache the values for userspace */
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
adev->gfx.config.rb_config[i][j].rb_backend_disable =
RREG32(mmCC_RB_BACKEND_DISABLE);
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
@@ -1801,7 +1800,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
RREG32(mmPA_SC_RASTER_CONFIG_1);
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -1911,7 +1910,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -2728,7 +2727,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
u32 *hpd;
size_t mec_hpd_size;
- bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+ bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
@@ -2758,8 +2757,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
return 0;
}
-struct hqd_registers
-{
+struct hqd_registers {
u32 cp_mqd_base_addr;
u32 cp_mqd_base_addr_hi;
u32 cp_hqd_active;
@@ -3301,7 +3299,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -3309,7 +3307,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3361,7 +3359,7 @@ static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
return true;
}
-static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
u32 tmp, i, mask;
@@ -3383,7 +3381,7 @@ static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
u32 tmp;
@@ -3474,7 +3472,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(mmRLC_LB_PARAMS, 0x00600408);
WREG32(mmRLC_LB_CNTL, 0x80000004);
@@ -3530,7 +3528,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3584,7 +3582,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3635,7 +3633,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
@@ -4111,7 +4109,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
*(out++) = RREG32(mmSQ_IND_DATA);
}
-static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
/* type 0 wave data */
dst[(*no_fields)++] = 0;
@@ -4136,7 +4134,7 @@ static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
}
-static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t start,
uint32_t size, uint32_t *dst)
{
@@ -4146,7 +4144,7 @@ static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
}
static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 q, u32 vm)
+ u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{
cik_srbm_select(adev, me, pipe, q, vm);
}
@@ -4178,6 +4176,7 @@ static int gfx_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->gfx.xcc_mask = 1;
adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
AMDGPU_MAX_COMPUTE_RINGS);
@@ -4456,7 +4455,8 @@ static int gfx_v7_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
continue;
r = gfx_v7_0_compute_ring_init(adev,
@@ -5114,18 +5114,18 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 4 && j < 2)
gfx_v7_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
- for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {
if (counter < ao_cu_num)
ao_bitmap |= mask;
- counter ++;
+ counter++;
}
mask <<= 1;
}
@@ -5135,7 +5135,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
@@ -5147,8 +5147,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
cu_info->lds_size = 64;
}
-const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
-{
+const struct amdgpu_ip_block_version gfx_v7_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GFX,
.major = 7,
.minor = 1,
@@ -5156,8 +5155,7 @@ const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
.funcs = &gfx_v7_0_ip_funcs,
};
-const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
-{
+const struct amdgpu_ip_block_version gfx_v7_2_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GFX,
.major = 7,
.minor = 2,
@@ -5165,8 +5163,7 @@ const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
.funcs = &gfx_v7_0_ip_funcs,
};
-const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
-{
+const struct amdgpu_ip_block_version gfx_v7_3_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GFX,
.major = 7,
.minor = 3,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b1f2684d854a..51c1745c8369 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1304,7 +1304,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
u32 *hpd;
size_t mec_hpd_size;
- bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+ bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
@@ -2001,7 +2001,8 @@ static int gfx_v8_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
continue;
r = gfx_v8_0_compute_ring_init(adev,
@@ -2015,19 +2016,19 @@ static int gfx_v8_0_sw_init(void *handle)
}
}
- r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE);
+ r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE, 0);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n");
return r;
}
- kiq = &adev->gfx.kiq;
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
+ kiq = &adev->gfx.kiq[0];
+ r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
if (r)
return r;
/* create MQD for all compute queues as well as KIQ for SRIOV case */
- r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation));
+ r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation), 0);
if (r)
return r;
@@ -2050,9 +2051,9 @@ static int gfx_v8_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
- amdgpu_gfx_kiq_fini(adev);
+ amdgpu_gfx_mqd_sw_fini(adev, 0);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
+ amdgpu_gfx_kiq_fini(adev, 0);
gfx_v8_0_mec_fini(adev);
amdgpu_gfx_rlc_fini(adev);
@@ -3394,7 +3395,8 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
- u32 se_num, u32 sh_num, u32 instance)
+ u32 se_num, u32 sh_num, u32 instance,
+ int xcc_id)
{
u32 data;
@@ -3417,7 +3419,7 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
}
static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 q, u32 vm)
+ u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{
vi_srbm_select(adev, me, pipe, q, vm);
}
@@ -3578,13 +3580,13 @@ gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
}
/* GRBM_GFX_INDEX has a different offset on VI */
- gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
}
/* GRBM_GFX_INDEX has a different offset on VI */
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
}
static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
@@ -3600,13 +3602,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v8_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
@@ -3629,7 +3631,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
/* cache the values for userspace */
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
adev->gfx.config.rb_config[i][j].rb_backend_disable =
RREG32(mmCC_RB_BACKEND_DISABLE);
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
@@ -3640,7 +3642,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
RREG32(mmPA_SC_RASTER_CONFIG_1);
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -3787,7 +3789,7 @@ static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmPA_SC_FIFO_SIZE,
(adev->gfx.config.sc_prim_fifo_size_frontend <<
@@ -3818,7 +3820,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -3826,7 +3828,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
if (k == adev->usec_timeout) {
gfx_v8_0_select_se_sh(adev, 0xffffffff,
- 0xffffffff, 0xffffffff);
+ 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
i, j);
@@ -3834,7 +3836,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -4281,7 +4283,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
amdgpu_ring_clear_ring(ring);
gfx_v8_0_cp_gfx_start(adev);
- ring->sched.ready = true;
return 0;
}
@@ -4292,7 +4293,7 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32(mmCP_MEC_CNTL, 0);
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- adev->gfx.kiq.ring.sched.ready = false;
+ adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
}
@@ -4314,12 +4315,12 @@ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
uint64_t queue_mask = 0;
int r, i;
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
- if (!test_bit(i, adev->gfx.mec.queue_bitmap))
+ if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap))
continue;
/* This situation may be hit in the future if a new HW
@@ -4595,14 +4596,13 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct vi_mqd *mqd = ring->mqd_ptr;
- int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
gfx_v8_0_kiq_setting(ring);
if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
@@ -4625,8 +4625,8 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct vi_mqd_allocation));
}
return 0;
@@ -4650,15 +4650,13 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
- } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
+ } else {
+ /* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
amdgpu_ring_clear_ring(ring);
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
}
@@ -4678,21 +4676,22 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
- if (unlikely(r != 0))
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
return r;
+ }
gfx_v8_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
- ring->sched.ready = true;
return 0;
}
@@ -4741,7 +4740,7 @@ static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
if (r)
return r;
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
@@ -4808,7 +4807,7 @@ static int gfx_v8_0_hw_init(void *handle)
static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
{
int r, i;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
if (r)
@@ -4902,7 +4901,7 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (!gfx_v8_0_wait_for_idle(adev))
gfx_v8_0_cp_enable(adev, false);
else
@@ -4911,7 +4910,7 @@ static int gfx_v8_0_hw_fini(void *handle)
adev->gfx.rlc.funcs->stop(adev);
else
pr_err("rlc is busy, skip halt rlc\n");
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5216,7 +5215,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
*(out++) = RREG32(mmSQ_IND_DATA);
}
-static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
/* type 0 wave data */
dst[(*no_fields)++] = 0;
@@ -5241,7 +5240,7 @@ static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
}
-static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t start,
uint32_t size, uint32_t *dst)
{
@@ -5263,6 +5262,7 @@ static int gfx_v8_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->gfx.xcc_mask = 1;
adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
AMDGPU_MAX_COMPUTE_RINGS);
@@ -5376,7 +5376,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
@@ -5430,7 +5430,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5481,7 +5481,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
{
uint32_t data;
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
@@ -5535,7 +5535,7 @@ static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
return true;
}
-static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
@@ -5562,7 +5562,7 @@ static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
@@ -5621,7 +5621,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t temp, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
@@ -5717,7 +5717,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev);
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5727,7 +5727,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
@@ -5810,7 +5810,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
@@ -6723,11 +6723,11 @@ static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data,
*/
if (from_wq) {
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
+ gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id, 0);
sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -7001,7 +7001,7 @@ static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq;
+ adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
@@ -7116,7 +7116,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 4 && j < 2)
gfx_v8_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
@@ -7137,7 +7137,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f46d4b18a3fa..458faf657042 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -149,16 +149,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven 0x007a
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven_BASE_IDX 0
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven 0x007b
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven_BASE_IDX 0
-
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven2 0x0068
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven2_BASE_IDX 0
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven2 0x0069
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven2_BASE_IDX 0
-
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -765,13 +755,15 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
+static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
- void *inject_if);
+ void *inject_if, uint32_t instance_mask);
static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
+static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
+ unsigned int vmid);
static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
@@ -898,7 +890,7 @@ static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
- adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
+ adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs;
}
static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
@@ -1504,7 +1496,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
mask = 1;
cu_bitmap = 0;
counter = 0;
- amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (cu_info->bitmap[i][j] & mask) {
@@ -1523,7 +1515,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
}
}
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -1545,7 +1537,7 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
/* set mmRLC_LB_PARAMS = 0x003F_1006 */
@@ -1594,7 +1586,7 @@ static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
/* set mmRLC_LB_PARAMS = 0x003F_1006 */
@@ -1642,7 +1634,7 @@ static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
{
struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
- reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
@@ -1677,22 +1669,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return r;
}
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(9, 2, 2):
- case IP_VERSION(9, 1, 0):
- gfx_v9_0_init_lbpw(adev);
- break;
- case IP_VERSION(9, 4, 0):
- gfx_v9_4_init_lbpw(adev);
- break;
- default:
- break;
- }
-
- /* init spm vmid with 0xf */
- if (adev->gfx.rlc.funcs->update_spm_vmid)
- adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
-
return 0;
}
@@ -1713,7 +1689,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
const struct gfx_firmware_header_v1_0 *mec_hdr;
- bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+ bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
@@ -1788,7 +1764,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
*(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
}
-static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
+static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
/* type 1 wave data */
dst[(*no_fields)++] = 1;
@@ -1809,7 +1785,7 @@ static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
}
-static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t start,
uint32_t size, uint32_t *dst)
{
@@ -1818,7 +1794,7 @@ static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
-static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t start, uint32_t size,
uint32_t *dst)
@@ -1829,9 +1805,9 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
}
static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 q, u32 vm)
+ u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{
- soc15_grbm_select(adev, me, pipe, q, vm);
+ soc15_grbm_select(adev, me, pipe, q, vm, 0);
}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
@@ -2005,7 +1981,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX9_MEC_HPD_SIZE);
- ring->vm_hub = AMDGPU_GFXHUB_0;
+ ring->vm_hub = AMDGPU_GFXHUB(0);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
@@ -2105,7 +2081,7 @@ static int gfx_v9_0_sw_init(void *handle)
/* disable scheduler on the real ring */
ring->no_scheduler = true;
- ring->vm_hub = AMDGPU_GFXHUB_0;
+ ring->vm_hub = AMDGPU_GFXHUB(0);
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -2123,7 +2099,7 @@ static int gfx_v9_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
ring->is_sw_ring = true;
hw_prio = amdgpu_sw_ring_priority(i);
- ring->vm_hub = AMDGPU_GFXHUB_0;
+ ring->vm_hub = AMDGPU_GFXHUB(0);
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio,
NULL);
@@ -2154,7 +2130,8 @@ static int gfx_v9_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
continue;
r = gfx_v9_0_compute_ring_init(adev,
@@ -2168,19 +2145,19 @@ static int gfx_v9_0_sw_init(void *handle)
}
}
- r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
+ r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n");
return r;
}
- kiq = &adev->gfx.kiq;
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
+ kiq = &adev->gfx.kiq[0];
+ r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
if (r)
return r;
/* create MQD for all compute queues as wel as KIQ for SRIOV case */
- r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
+ r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0);
if (r)
return r;
@@ -2215,9 +2192,9 @@ static int gfx_v9_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
- amdgpu_gfx_kiq_fini(adev);
+ amdgpu_gfx_mqd_sw_fini(adev, 0);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
+ amdgpu_gfx_kiq_fini(adev, 0);
gfx_v9_0_mec_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
@@ -2240,7 +2217,7 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
- u32 instance)
+ u32 instance, int xcc_id)
{
u32 data;
@@ -2289,19 +2266,42 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v9_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
}
+static void gfx_v9_0_debug_trap_config_init(struct amdgpu_device *adev,
+ uint32_t first_vmid,
+ uint32_t last_vmid)
+{
+ uint32_t data;
+ uint32_t trap_config_vmid_mask = 0;
+ int i;
+
+ /* Calculate trap config vmid mask */
+ for (i = first_vmid; i < last_vmid; i++)
+ trap_config_vmid_mask |= (1 << i);
+
+ data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG,
+ VMID_SEL, trap_config_vmid_mask);
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
+ TRAP_EN, 1);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
+}
+
#define DEFAULT_SH_MEM_BASES (0x6000)
static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
{
@@ -2323,12 +2323,12 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_lock(&adev->srbm_mutex);
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
- soc15_grbm_select(adev, 0, 0, 0, i);
+ soc15_grbm_select(adev, 0, 0, 0, i, 0);
/* CP and shaders */
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
}
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
@@ -2366,8 +2366,8 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 4, 1):
tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
- tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
- DISABLE_BARRIER_WAITCNT, 1);
+ tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT,
+ !READ_ONCE(adev->barrier_has_auto_waitcnt));
WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
break;
default:
@@ -2392,8 +2392,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
- soc15_grbm_select(adev, 0, 0, 0, i);
+ for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
+ soc15_grbm_select(adev, 0, 0, 0, i, 0);
/* CP and shaders */
if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
@@ -2415,7 +2415,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
}
}
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
@@ -2432,7 +2432,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -2440,7 +2440,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
if (k == adev->usec_timeout) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff,
- 0xffffffff, 0xffffffff);
+ 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
i, j);
@@ -2448,7 +2448,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -2928,12 +2928,14 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
+ gfx_v9_0_init_lbpw(adev);
if (amdgpu_lbpw == 0)
gfx_v9_0_enable_lbpw(adev, false);
else
gfx_v9_0_enable_lbpw(adev, true);
break;
case IP_VERSION(9, 4, 0):
+ gfx_v9_4_init_lbpw(adev);
if (amdgpu_lbpw > 0)
gfx_v9_0_enable_lbpw(adev, true);
else
@@ -2943,6 +2945,8 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
break;
}
+ gfx_v9_0_update_spm_vmid_internal(adev, 0xf);
+
adev->gfx.rlc.funcs->start(adev);
return 0;
@@ -3143,7 +3147,6 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
gfx_v9_0_cp_gfx_start(adev);
- ring->sched.ready = true;
return 0;
}
@@ -3155,7 +3158,7 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- adev->gfx.kiq.ring.sched.ready = false;
+ adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
}
@@ -3519,7 +3522,6 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
- int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
struct v9_mqd *tmp_mqd;
gfx_v9_0_kiq_setting(ring);
@@ -3529,20 +3531,20 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
* driver need to re-init the mqd.
* check mqd->cp_hqd_pq_control since this value should not be 0
*/
- tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
+ tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup;
if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
/* for GPU_RESET case , reset MQD to a clean status */
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_kiq_init_register(ring);
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
} else {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
@@ -3551,14 +3553,14 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev) && adev->in_suspend)
amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_mqd_init(ring);
gfx_v9_0_kiq_init_register(ring);
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
}
return 0;
@@ -3582,24 +3584,21 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_mqd_init(ring);
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
- } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
+ } else {
+ /* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
-
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
@@ -3610,21 +3609,22 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
- if (unlikely(r != 0))
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
return r;
+ }
gfx_v9_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
- ring->sched.ready = true;
return 0;
}
@@ -3652,7 +3652,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
goto done;
}
- r = amdgpu_gfx_enable_kcq(adev);
+ r = amdgpu_gfx_enable_kcq(adev, 0);
done:
return r;
}
@@ -3772,7 +3772,7 @@ static int gfx_v9_0_hw_fini(void *handle)
/* DF freeze and kcq disable will fail */
if (!amdgpu_ras_intr_triggered())
/* disable KCQ to avoid CPC touch memory not valid anymore */
- amdgpu_gfx_disable_kcq(adev);
+ amdgpu_gfx_disable_kcq(adev, 0);
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3790,11 +3790,11 @@ static int gfx_v9_0_hw_fini(void *handle)
*/
if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
- adev->gfx.kiq.ring.pipe,
- adev->gfx.kiq.ring.queue, 0);
- gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
+ adev->gfx.kiq[0].ring.pipe,
+ adev->gfx.kiq[0].ring.queue, 0, 0);
+ gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
@@ -3914,7 +3914,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
unsigned long flags;
uint32_t seq, reg_val_offs = 0;
uint64_t value = 0;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
@@ -4002,36 +4002,6 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
preempt_enable();
clock = clock_lo | (clock_hi << 32ULL);
break;
- case IP_VERSION(9, 1, 0):
- preempt_disable();
- clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
- clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
- hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
- /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over
- * roughly every 42 seconds.
- */
- if (hi_check != clock_hi) {
- clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
- clock_hi = hi_check;
- }
- preempt_enable();
- clock = clock_lo | (clock_hi << 32ULL);
- break;
- case IP_VERSION(9, 2, 2):
- preempt_disable();
- clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
- clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
- hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
- /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over
- * roughly every 42 seconds.
- */
- if (hi_check != clock_hi) {
- clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
- clock_hi = hi_check;
- }
- preempt_enable();
- clock = clock_lo | (clock_hi << 32ULL);
- break;
default:
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
@@ -4544,6 +4514,7 @@ static int gfx_v9_0_early_init(void *handle)
adev->gfx.num_gfx_rings = 0;
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
+ adev->gfx.xcc_mask = 1;
adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
AMDGPU_MAX_COMPUTE_RINGS);
gfx_v9_0_set_kiq_pm4_funcs(adev);
@@ -4609,6 +4580,13 @@ static int gfx_v9_0_late_init(void *handle)
if (r)
return r;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ gfx_v9_4_2_debug_trap_config_init(adev,
+ adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
+ else
+ gfx_v9_0_debug_trap_config_init(adev,
+ adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
+
return 0;
}
@@ -4624,7 +4602,7 @@ static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
return true;
}
-static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
@@ -4641,7 +4619,7 @@ static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
@@ -4652,7 +4630,7 @@ static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -4664,7 +4642,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -4691,7 +4669,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t data, def;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
@@ -4758,7 +4736,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
}
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
@@ -4769,7 +4747,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
if (!adev->gfx.num_gfx_rings)
return;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* Enable 3D CGCG/CGLS */
if (enable) {
@@ -4813,7 +4791,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -4821,7 +4799,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t def, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
@@ -4865,7 +4843,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -4893,12 +4871,11 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
-static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
+ unsigned int vmid)
{
u32 reg, data;
- amdgpu_gfx_off_ctrl(adev, false);
-
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
@@ -4912,6 +4889,13 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
+{
+ amdgpu_gfx_off_ctrl(adev, false);
+
+ gfx_v9_0_update_spm_vmid_internal(adev, vmid);
amdgpu_gfx_off_ctrl(adev, true);
}
@@ -5165,7 +5149,8 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
gfx_v9_0_ring_emit_de_meta(ring,
(!amdgpu_sriov_vf(ring->adev) &&
flags & AMDGPU_IB_PREEMPTED) ?
- true : false);
+ true : false,
+ job->gds_size > 0 && job->gds_base != 0);
}
amdgpu_ring_write(ring, header);
@@ -5176,9 +5161,86 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
#endif
lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_ib_on_emit_cntl(ring);
amdgpu_ring_write(ring, control);
}
+static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring,
+ unsigned offset)
+{
+ u32 control = ring->ring[offset];
+
+ control |= INDIRECT_BUFFER_PRE_RESUME(1);
+ ring->ring[offset] = control;
+}
+
+static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
+ unsigned offset)
+{
+ struct amdgpu_device *adev = ring->adev;
+ void *ce_payload_cpu_addr;
+ uint64_t payload_offset, payload_size;
+
+ payload_size = sizeof(struct v9_ce_ib_state);
+
+ if (ring->is_mes_queue) {
+ payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
+ gfx[0].gfx_meta_data) +
+ offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_cpu_addr =
+ amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
+ } else {
+ payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
+ }
+
+ if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
+ memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
+ } else {
+ memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr,
+ (ring->buf_mask + 1 - offset) << 2);
+ payload_size -= (ring->buf_mask + 1 - offset) << 2;
+ memcpy((void *)&ring->ring[0],
+ ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
+ payload_size);
+ }
+}
+
+static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
+ unsigned offset)
+{
+ struct amdgpu_device *adev = ring->adev;
+ void *de_payload_cpu_addr;
+ uint64_t payload_offset, payload_size;
+
+ payload_size = sizeof(struct v9_de_ib_state);
+
+ if (ring->is_mes_queue) {
+ payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
+ gfx[0].gfx_meta_data) +
+ offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_cpu_addr =
+ amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
+ } else {
+ payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
+ }
+
+ ((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
+ IB_COMPLETION_STATUS_PREEMPTED;
+
+ if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
+ memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size);
+ } else {
+ memcpy((void *)&ring->ring[offset], de_payload_cpu_addr,
+ (ring->buf_mask + 1 - offset) << 2);
+ payload_size -= (ring->buf_mask + 1 - offset) << 2;
+ memcpy((void *)&ring->ring[0],
+ de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
+ payload_size);
+ }
+}
+
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
@@ -5374,6 +5436,8 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr));
+ amdgpu_ring_ib_on_emit_ce(ring);
+
if (resume)
amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr,
sizeof(ce_payload) >> 2);
@@ -5386,7 +5450,7 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
@@ -5407,10 +5471,6 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
amdgpu_ring_alloc(ring, 13);
gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT);
- /*reset the CP_VMID_PREEMPT after trailing fence*/
- amdgpu_ring_emit_wreg(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
- 0x0);
/* assert IB preemption, emit the trailing fence */
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
@@ -5433,6 +5493,10 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
DRM_WARN("ring %d timeout to preempt ib\n", ring->idx);
}
+ /*reset the CP_VMID_PREEMPT after trailing fence*/
+ amdgpu_ring_emit_wreg(ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
+ 0x0);
amdgpu_ring_commit(ring);
/* deassert preemption condition */
@@ -5440,7 +5504,7 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
-static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
+static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds)
{
struct amdgpu_device *adev = ring->adev;
struct v9_de_ib_state de_payload = {0};
@@ -5471,8 +5535,10 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
PAGE_SIZE);
}
- de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
- de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
+ if (usegds) {
+ de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
+ de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
+ }
cnt = (sizeof(de_payload) >> 2) + 4 - 2;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
@@ -5483,6 +5549,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
+ amdgpu_ring_ib_on_emit_de(ring);
if (resume)
amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
sizeof(de_payload) >> 2);
@@ -6342,7 +6409,7 @@ static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
};
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
- void *inject_if)
+ void *inject_if, uint32_t instance_mask)
{
struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
int ret;
@@ -6381,7 +6448,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
block_info.value = info->value;
mutex_lock(&adev->grbm_idx_mutex);
- ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask);
mutex_unlock(&adev->grbm_idx_mutex);
return ret;
@@ -6609,7 +6676,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
- amdgpu_gfx_select_se_sh(adev, j, 0x0, k);
+ amdgpu_gfx_select_se_sh(adev, j, 0x0, k, 0);
RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
}
}
@@ -6671,7 +6738,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
- amdgpu_gfx_select_se_sh(adev, j, 0, k);
+ amdgpu_gfx_select_se_sh(adev, j, 0, k, 0);
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
if (reg_value)
@@ -6686,7 +6753,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
err_data->ce_count += sec_count;
err_data->ue_count += ded_count;
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
gfx_v9_0_query_utc_edc_status(adev, err_data);
@@ -6893,6 +6960,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
+ .patch_cntl = gfx_v9_0_ring_patch_cntl,
+ .patch_de = gfx_v9_0_ring_patch_de_meta,
+ .patch_ce = gfx_v9_0_ring_patch_ce_meta,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -6965,7 +7035,7 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
+ adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
@@ -7146,7 +7216,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
mask = 1;
ao_bitmap = 0;
counter = 0;
- amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
gfx_v9_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
@@ -7179,7 +7249,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
}
}
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
index dfe8d4841f58..f9f6edc5e558 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
@@ -27,6 +27,6 @@
extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block;
void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
- u32 instance);
+ u32 instance, int xcc_id);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index c67e387a97f5..bc8416afb62c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -970,29 +970,6 @@ static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
}
-static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
- void *inject_if)
-{
- struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
- int ret;
- struct ta_ras_trigger_error_input block_info = { 0 };
-
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
- return -EINVAL;
-
- block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
- block_info.sub_block_index = info->head.sub_block_index;
- block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
- block_info.address = info->address;
- block_info.value = info->value;
-
- mutex_lock(&adev->grbm_idx_mutex);
- ret = psp_ras_trigger_error(&adev->psp, &block_info);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- return ret;
-}
-
static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs =
{ SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
@@ -1030,7 +1007,6 @@ static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
const struct amdgpu_ras_block_hw_ops gfx_v9_4_ras_ops = {
- .ras_error_inject = &gfx_v9_4_ras_error_inject,
.query_ras_error_count = &gfx_v9_4_query_ras_error_count,
.reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
.query_ras_error_status = &gfx_v9_4_query_ras_error_status,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 3a797424579c..63f6843a069e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -761,7 +761,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
for (i = first_vmid; i < last_vmid; i++) {
data = 0;
- soc15_grbm_select(adev, 0, 0, 0, i);
+ soc15_grbm_select(adev, 0, 0, 0, i, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE,
@@ -769,15 +769,18 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data);
}
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_TRAP_DATA0), 0);
+ WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_TRAP_DATA1), 0);
}
void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev)
{
u32 tmp;
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
tmp = 0;
tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL, PATTERN_MODE, 1);
@@ -1699,28 +1702,6 @@ static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL);
}
-static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
-{
- struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
- int ret;
- struct ta_ras_trigger_error_input block_info = { 0 };
-
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
- return -EINVAL;
-
- block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
- block_info.sub_block_index = info->head.sub_block_index;
- block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
- block_info.address = info->address;
- block_info.value = info->value;
-
- mutex_lock(&adev->grbm_idx_mutex);
- ret = psp_ras_trigger_error(&adev->psp, &block_info);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- return ret;
-}
-
static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
{
uint32_t i, j;
@@ -1935,7 +1916,7 @@ static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev)
u32 status = 0;
struct amdgpu_vmhub *hub;
- hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
status = RREG32(hub->vm_l2_pro_fault_status);
/* reset page fault status */
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -1944,7 +1925,6 @@ static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev)
}
struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = {
- .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
.query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
.reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
.query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 5f8500577c02..57ed4e5c294c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -25,35 +25,500 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "soc15.h"
+#include "soc15d.h"
#include "soc15_common.h"
#include "vega10_enum.h"
+#include "v9_structs.h"
+
+#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
+
#include "gc/gc_9_4_3_offset.h"
#include "gc/gc_9_4_3_sh_mask.h"
#include "gfx_v9_4_3.h"
+#include "amdgpu_xcp.h"
+
+MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
+#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define GOLDEN_GB_ADDR_CONFIG 0x2a114042
+#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
+
+struct amdgpu_gfx_ras gfx_v9_4_3_ras;
+
+static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
+static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
+static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
+static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
+static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+ struct amdgpu_cu_info *cu_info);
+
+static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
+ uint64_t queue_mask)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_SET_RESOURCES_VMID_MASK(0) |
+ /* vmid_mask:0* queue_type:0 (KIQ) */
+ PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
+ amdgpu_ring_write(kiq_ring,
+ lower_32_bits(queue_mask)); /* queue mask lo */
+ amdgpu_ring_write(kiq_ring,
+ upper_32_bits(queue_mask)); /* queue mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* oac mask */
+ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
+}
+
+static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
+ PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
+ PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
+ /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
+ /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
+ PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
+ /* num_queues: must be 1 */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+}
+
+static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ enum amdgpu_unmap_queues_action action,
+ u64 gpu_addr, u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(action) |
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+
+ if (action == PREEMPT_QUEUES_NO_UNMAP) {
+ amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, seq);
+ } else {
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ }
+}
+
+static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ u64 addr,
+ u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
+ PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
+ PACKET3_QUERY_STATUS_COMMAND(2));
+ /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
+ PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
+}
+
+static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+ PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
+ PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+ PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
+}
+
+static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
+ .kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
+ .kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
+ .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
+ .kiq_query_status = gfx_v9_4_3_kiq_query_status,
+ .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
+ .set_resources_size = 8,
+ .map_queues_size = 7,
+ .unmap_queues_size = 6,
+ .query_status_size = 7,
+ .invalidate_tlbs_size = 2,
+};
+
+static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
+{
+ int i, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++)
+ adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
+}
+
+static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
+{
+ int i, num_xcc, dev_inst;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
+ dev_inst = GET_INST(GC, i);
+
+ WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
+ GOLDEN_GB_ADDR_CONFIG);
+ /* Golden settings applied by driver for ASIC with rev_id 0 */
+ if (adev->rev_id == 0) {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
+ REDUCE_FIFO_DEPTH_BY_2, 2);
+ }
+ }
+}
+
+static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
+ bool wc, uint32_t reg, uint32_t val)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
+ WRITE_DATA_DST_SEL(0) |
+ (wc ? WR_CONFIRM : 0));
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, val);
+}
+
+static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
+ int mem_space, int opt, uint32_t addr0,
+ uint32_t addr1, uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring,
+ /* memory (1) or register (0) */
+ (WAIT_REG_MEM_MEM_SPACE(mem_space) |
+ WAIT_REG_MEM_OPERATION(opt) | /* wait */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(eng_sel)));
+
+ if (mem_space)
+ BUG_ON(addr0 & 0x3); /* Dword align */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ amdgpu_ring_write(ring, ref);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, inv); /* poll interval */
+}
+
+static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
+{
+ uint32_t scratch_reg0_offset, xcc_offset;
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ /* Use register offset which is local to XCC in the packet */
+ xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
+ scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
+ WREG32(scratch_reg0_offset, 0xCAFEDEAD);
+
+ r = amdgpu_ring_alloc(ring, 3);
+ if (r)
+ return r;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+ amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(scratch_reg0_offset);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ return r;
+}
+
+static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct dma_fence *f = NULL;
+
+ unsigned index;
+ uint64_t gpu_addr;
+ uint32_t tmp;
+ long r;
+
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err1;
+
+ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib.ptr[2] = lower_32_bits(gpu_addr);
+ ib.ptr[3] = upper_32_bits(gpu_addr);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
+
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ if (r)
+ goto err2;
+
+ r = dma_fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ r = -ETIMEDOUT;
+ goto err2;
+ } else if (r < 0) {
+ goto err2;
+ }
+
+ tmp = adev->wb.wb[index];
+ if (tmp == 0xDEADBEEF)
+ r = 0;
+ else
+ r = -EINVAL;
+
+err2:
+ amdgpu_ib_free(adev, &ib, NULL);
+ dma_fence_put(f);
+err1:
+ amdgpu_device_wb_free(adev, index);
+ return r;
+}
+
+
+/* This value might differs per partition */
static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
- amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32_SOC15(GC, 0, regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32_SOC15(GC, 0, regRLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32_SOC15(GC, 0, regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
- amdgpu_gfx_off_ctrl(adev, true);
return clock;
}
-static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev,
- u32 se_num,
- u32 sh_num,
- u32 instance)
+static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
+{
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
+
+ kfree(adev->gfx.rlc.register_list_format);
+}
+
+static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
+ const char *chip_name)
+{
+ char fw_name[30];
+ int err;
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
+ uint16_t version_major;
+ uint16_t version_minor;
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
+ if (err)
+ goto out;
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+
+ version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
+ version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+out:
+ if (err)
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+
+ return err;
+}
+
+static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
+{
+ return true;
+}
+
+static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
+{
+ if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+}
+
+static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
+ const char *chip_name)
+{
+ char fw_name[30];
+ int err;
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ if (err)
+ goto out;
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
+
+ adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
+ adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
+
+ gfx_v9_4_3_check_if_need_gfxoff(adev);
+
+out:
+ if (err)
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ return err;
+}
+
+static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ int r;
+
+ chip_name = "gc_9_4_3";
+
+ r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name);
+ if (r)
+ return r;
+
+ r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
+}
+
+static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
+{
+ int r, i, num_xcc;
+ u32 *hpd;
+ const __le32 *fw_data;
+ unsigned fw_size;
+ u32 *fw;
+ size_t mec_hpd_size;
+
+ const struct gfx_firmware_header_v1_0 *mec_hdr;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++)
+ bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
+ AMDGPU_MAX_COMPUTE_QUEUES);
+
+ /* take ownership of the relevant compute queues */
+ amdgpu_gfx_compute_queue_acquire(adev);
+ mec_hpd_size =
+ adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
+ if (mec_hpd_size) {
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+ gfx_v9_4_3_mec_fini(adev);
+ return r;
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ for (i = 0; i < mec_hpd_size / 4; i++) {
+ memset((void *)(hpd + i), 0, 4);
+ if (i % 50 == 0)
+ msleep(1);
+ }
+ } else {
+ memset(hpd, 0, mec_hpd_size);
+ }
+
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ }
+
+ mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
+
+ r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.mec_fw_obj,
+ &adev->gfx.mec.mec_fw_gpu_addr,
+ (void **)&fw);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
+ gfx_v9_4_3_mec_fini(adev);
+ return r;
+ }
+
+ memcpy(fw, fw_data, fw_size);
+
+ amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
+
+ return 0;
+}
+
+static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 instance, int xcc_id)
{
u32 data;
@@ -76,24 +541,24 @@ static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev,
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data);
+ WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
}
-static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
{
- WREG32_SOC15_RLC(GC, 0, regSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(address << SQ_IND_INDEX__INDEX__SHIFT) |
(SQ_IND_INDEX__FORCE_READ_MASK));
- return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
+ return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
}
-static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
+static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t regno, uint32_t num, uint32_t *out)
{
- WREG32_SOC15_RLC(GC, 0, regSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(regno << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -101,53 +566,478 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
(SQ_IND_INDEX__FORCE_READ_MASK) |
(SQ_IND_INDEX__AUTO_INCR_MASK));
while (num--)
- *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
+ *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
}
static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
- uint32_t simd, uint32_t wave,
+ uint32_t xcc_id, uint32_t simd, uint32_t wave,
uint32_t *dst, int *no_fields)
{
/* type 1 wave data */
dst[(*no_fields)++] = 1;
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
- dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
-}
-
-static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
+ dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
+}
+
+static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t start,
uint32_t size, uint32_t *dst)
{
- wave_read_regs(adev, simd, wave, 0,
+ wave_read_regs(adev, xcc_id, simd, wave, 0,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
-static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t start, uint32_t size,
uint32_t *dst)
{
- wave_read_regs(adev, simd, wave, thread,
+ wave_read_regs(adev, xcc_id, simd, wave, thread,
start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
}
static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 q, u32 vm)
+ u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
+{
+ soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
+}
+
+
+static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
+ int num_xccs_per_xcp)
+{
+ int ret, i, num_xcc;
+ u32 tmp = 0, regval;
+
+ if (adev->psp.funcs) {
+ ret = psp_spatial_partition(&adev->psp,
+ NUM_XCC(adev->gfx.xcc_mask) /
+ num_xccs_per_xcp);
+ if (ret)
+ return ret;
+ }
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ for (i = 0; i < num_xcc; i++) {
+ tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
+ num_xccs_per_xcp);
+ tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
+ i % num_xccs_per_xcp);
+ regval = RREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL);
+ if (regval != tmp)
+ WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
+ tmp);
+ }
+
+ adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
+
+ return 0;
+}
+
+static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
+{
+ int xcc;
+
+ xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
+ if (!xcc) {
+ dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
+ return -EINVAL;
+ }
+
+ return xcc - 1;
+}
+
+static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
+ .read_wave_data = &gfx_v9_4_3_read_wave_data,
+ .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
+ .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
+ .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
+};
+
+static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
{
- soc15_grbm_select(adev, me, pipe, q, vm);
+ u32 gb_addr_config;
+
+ adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
+ adev->gfx.ras = &gfx_v9_4_3_ras;
+
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 3):
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ adev->gfx.config.gb_addr_config = gb_addr_config;
+
+ adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ NUM_PIPES);
+
+ adev->gfx.config.max_tile_pipes =
+ adev->gfx.config.gb_addr_config_fields.num_pipes;
+
+ adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ NUM_BANKS);
+ adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ MAX_COMPRESSED_FRAGS);
+ adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ NUM_RB_PER_SE);
+ adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ NUM_SHADER_ENGINES);
+ adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ PIPE_INTERLEAVE_SIZE));
+
+ return 0;
+}
+
+static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
+ int xcc_id, int mec, int pipe, int queue)
+{
+ unsigned irq_type;
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ unsigned int hw_prio;
+ uint32_t xcc_doorbell_start;
+
+ ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
+ ring_id];
+
+ /* mec0 is me1 */
+ ring->xcc_id = xcc_id;
+ ring->me = mec + 1;
+ ring->pipe = pipe;
+ ring->queue = queue;
+
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+ xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
+ xcc_id * adev->doorbell_index.xcc_doorbell_range;
+ ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
+ ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
+ (ring_id + xcc_id * adev->gfx.num_compute_rings) *
+ GFX9_MEC_HPD_SIZE;
+ ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
+ sprintf(ring->name, "comp_%d.%d.%d.%d",
+ ring->xcc_id, ring->me, ring->pipe, ring->queue);
+
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ + ring->pipe;
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ /* type-2 packets are deprecated on MEC, use type-3 instead */
+ return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ hw_prio, NULL);
+}
+
+static int gfx_v9_4_3_sw_init(void *handle)
+{
+ int i, j, k, r, ring_id, xcc_id, num_xcc;
+ struct amdgpu_kiq *kiq;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->gfx.mec.num_mec = 2;
+ adev->gfx.mec.num_pipe_per_mec = 4;
+ adev->gfx.mec.num_queue_per_pipe = 8;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ /* EOP Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
+ if (r)
+ return r;
+
+ /* Privileged reg */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
+ &adev->gfx.priv_reg_irq);
+ if (r)
+ return r;
+
+ /* Privileged inst */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
+ &adev->gfx.priv_inst_irq);
+ if (r)
+ return r;
+
+ adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
+
+ r = adev->gfx.rlc.funcs->init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init rlc BOs!\n");
+ return r;
+ }
+
+ r = gfx_v9_4_3_mec_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init MEC BOs!\n");
+ return r;
+ }
+
+ /* set up the compute queues - allocate horizontally across pipes */
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ ring_id = 0;
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
+ k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(
+ adev, xcc_id, i, k, j))
+ continue;
+
+ r = gfx_v9_4_3_compute_ring_init(adev,
+ ring_id,
+ xcc_id,
+ i, k, j);
+ if (r)
+ return r;
+
+ ring_id++;
+ }
+ }
+ }
+
+ r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
+ if (r) {
+ DRM_ERROR("Failed to init KIQ BOs!\n");
+ return r;
+ }
+
+ kiq = &adev->gfx.kiq[xcc_id];
+ r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, xcc_id);
+ if (r)
+ return r;
+
+ /* create MQD for all compute queues as wel as KIQ for SRIOV case */
+ r = amdgpu_gfx_mqd_sw_init(adev,
+ sizeof(struct v9_mqd_allocation), xcc_id);
+ if (r)
+ return r;
+ }
+
+ r = gfx_v9_4_3_gpu_early_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_gfx_sysfs_init(adev);
+ if (r)
+ return r;
+
+ return amdgpu_gfx_ras_sw_init(adev);
+}
+
+static int gfx_v9_4_3_sw_fini(void *handle)
+{
+ int i, num_xcc;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
+ amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+ for (i = 0; i < num_xcc; i++) {
+ amdgpu_gfx_mqd_sw_fini(adev, i);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
+ amdgpu_gfx_kiq_fini(adev, i);
+ }
+
+ gfx_v9_4_3_mec_fini(adev);
+ amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+ gfx_v9_4_3_free_microcode(adev);
+ amdgpu_gfx_sysfs_fini(adev);
+
+ return 0;
+}
+
+#define DEFAULT_SH_MEM_BASES (0x6000)
+static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ int i;
+ uint32_t sh_mem_config;
+ uint32_t sh_mem_bases;
+ uint32_t data;
+
+ /*
+ * Configure apertures:
+ * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
+ * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
+ * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
+ */
+ sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
+
+ sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+
+ mutex_lock(&adev->srbm_mutex);
+ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
+ soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
+ /* CP and shaders */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
+
+ /* Enable trap for each kfd vmid. */
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
+ }
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
+ mutex_unlock(&adev->srbm_mutex);
+
+ /* Initialize all compute VMIDs to have no GDS, GWS, or OA
+ acccess. These should be enabled by FW for target VMIDs. */
+ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
+ }
+}
+
+static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
+{
+ int vmid;
+
+ /*
+ * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
+ * access. Compute VMIDs should be enabled by FW for target VMIDs,
+ * the driver can enable them for graphics. VMID0 should maintain
+ * access so that HWS firmware can save/restore entries.
+ */
+ for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
+ }
+}
+
+static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ u32 tmp;
+ int i;
+
+ /* XXX SH_MEM regs */
+ /* where to put LDS, scratch, GPUVM in FSA64 space */
+ mutex_lock(&adev->srbm_mutex);
+ for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
+ soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
+ /* CP and shaders */
+ if (i == 0) {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
+ !!adev->gmc.noretry);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
+ regSH_MEM_CONFIG, tmp);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
+ regSH_MEM_BASES, 0);
+ } else {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
+ !!adev->gmc.noretry);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
+ regSH_MEM_CONFIG, tmp);
+ tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
+ (adev->gmc.private_aperture_start >>
+ 48));
+ tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
+ (adev->gmc.shared_aperture_start >>
+ 48));
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
+ regSH_MEM_BASES, tmp);
+ }
+ }
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
+
+ mutex_unlock(&adev->srbm_mutex);
+
+ gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
+ gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
+}
+
+static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
+{
+ int i, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
+ adev->gfx.config.db_debug2 =
+ RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
+
+ for (i = 0; i < num_xcc; i++)
+ gfx_v9_4_3_xcc_constants_init(adev, i);
+}
+
+static void
+gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
+}
+
+static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
+{
+ /*
+ * Rlc save restore list is workable since v2_1.
+ * And it's needed by gfxoff feature.
+ */
+ if (adev->gfx.rlc.is_rlc_v2_1)
+ gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
+}
+
+static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
+ data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
}
static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
@@ -155,36 +1045,55 @@ static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
uint32_t rlc_setting;
/* if RLC is not enabled, do nothing */
- rlc_setting = RREG32_SOC15(GC, 0, regRLC_CNTL);
+ rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
return false;
return true;
}
-static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
data = RLC_SAFE_MODE__CMD_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
}
-static void gfx_v9_4_3_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
+ int xcc_id)
{
uint32_t data;
data = RLC_SAFE_MODE__CMD_MASK;
- WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
+}
+
+static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
+{
+ int xcc_id, num_xcc;
+ struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
+ reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
+ reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
+ reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
+ reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
+ reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
+ reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
+ reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
+ }
}
static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
@@ -196,7 +1105,8 @@ static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev)
+static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
+ int xcc_id)
{
u32 i, j, k;
u32 mask;
@@ -204,15 +1114,17 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
+ xcc_id);
for (k = 0; k < adev->usec_timeout; k++) {
- if (RREG32_SOC15(GC, 0, regRLC_SERDES_CU_MASTER_BUSY) == 0)
+ if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
udelay(1);
}
if (k == adev->usec_timeout) {
- gfx_v9_4_3_select_se_sh(adev, 0xffffffff,
- 0xffffffff, 0xffffffff);
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
+ 0xffffffff,
+ 0xffffffff, xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
i, j);
@@ -220,7 +1132,8 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -228,79 +1141,108 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev)
RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
for (k = 0; k < adev->usec_timeout; k++) {
- if ((RREG32_SOC15(GC, 0, regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
+ if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
break;
udelay(1);
}
}
-static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev,
- bool enable)
+static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
+ bool enable, int xcc_id)
{
u32 tmp;
/* These interrupts should be enabled to drive DS clock */
- tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
- if (adev->gfx.num_gfx_rings)
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
- WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
+}
+
+static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
+{
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
+ RLC_ENABLE_F32, 0);
+ gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
+ gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
}
static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
{
- WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
- gfx_v9_4_3_enable_gui_idle_interrupt(adev, false);
- gfx_v9_4_3_wait_for_rlc_serdes(adev);
+ int i, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++)
+ gfx_v9_4_3_xcc_rlc_stop(adev, i);
}
-static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
+static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
{
- WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
+ SOFT_RESET_RLC, 1);
udelay(50);
- WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
+ SOFT_RESET_RLC, 0);
udelay(50);
}
-static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
+static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
{
-#ifdef AMDGPU_RLC_DEBUG_RETRY
- u32 rlc_ucode_ver;
-#endif
+ int i, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++)
+ gfx_v9_4_3_xcc_rlc_reset(adev, i);
+}
- WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
+static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
+{
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
+ RLC_ENABLE_F32, 1);
udelay(50);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU)) {
- gfx_v9_4_3_enable_gui_idle_interrupt(adev, true);
+ gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
udelay(50);
}
+}
+static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
+{
#ifdef AMDGPU_RLC_DEBUG_RETRY
- /* RLC_GPM_GENERAL_6 : RLC Ucode version */
- rlc_ucode_ver = RREG32_SOC15(GC, 0, regRLC_GPM_GENERAL_6);
- if (rlc_ucode_ver == 0x108) {
- dev_info(adev->dev,
- "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
- rlc_ucode_ver, adev->gfx.rlc_fw_version);
- /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
- * default is 0x9C4 to create a 100us interval */
- WREG32_SOC15(GC, 0, regRLC_GPM_TIMER_INT_3, 0x9C4);
- /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
- * to disable the page fault retry interrupts, default is
- * 0x100 (256) */
- WREG32_SOC15(GC, 0, regRLC_GPM_GENERAL_12, 0x100);
- }
+ u32 rlc_ucode_ver;
+#endif
+ int i, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
+ gfx_v9_4_3_xcc_rlc_start(adev, i);
+#ifdef AMDGPU_RLC_DEBUG_RETRY
+ /* RLC_GPM_GENERAL_6 : RLC Ucode version */
+ rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
+ if (rlc_ucode_ver == 0x108) {
+ dev_info(adev->dev,
+ "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
+ rlc_ucode_ver, adev->gfx.rlc_fw_version);
+ /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
+ * default is 0x9C4 to create a 100us interval */
+ WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
+ /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
+ * to disable the page fault retry interrupts, default is
+ * 0x100 (256) */
+ WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
+ }
#endif
+ }
}
-static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev)
+static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
+ int xcc_id)
{
const struct rlc_firmware_header_v2_0 *hdr;
const __le32 *fw_data;
@@ -316,49 +1258,65 @@ static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev)
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
RLCG_UCODE_LOADING_START_ADDRESS);
for (i = 0; i < fw_size; i++) {
if (amdgpu_emu_mode == 1 && i % 100 == 0) {
dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
msleep(1);
}
- WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
}
- WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
return 0;
}
-static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
+static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
{
int r;
- adev->gfx.rlc.funcs->stop(adev);
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
+ /* legacy rlc firmware loading */
+ r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
+ if (r)
+ return r;
+ gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
+ }
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
/* disable CG */
- WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
+ gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
+
+ return 0;
+}
- /* TODO: revisit pg function */
- /* gfx_v9_4_3_init_pg(adev);*/
+static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
+{
+ int r, i, num_xcc;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- /* legacy rlc firmware loading */
- r = gfx_v9_4_3_rlc_load_microcode(adev);
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
+ r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
if (r)
return r;
}
- adev->gfx.rlc.funcs->start(adev);
-
return 0;
}
-static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev,
+ unsigned vmid)
{
u32 reg, data;
- reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
+ reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
else
@@ -368,9 +1326,9 @@ static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
if (amdgpu_sriov_is_pp_one_vf(adev))
- WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
+ WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
else
- WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
+ WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
}
static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
@@ -382,7 +1340,7 @@ static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
uint32_t offset,
struct soc15_reg_rlcg *entries, int arr_size)
{
- int i;
+ int i, inst;
uint32_t reg;
if (!entries)
@@ -392,7 +1350,12 @@ static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
const struct soc15_reg_rlcg *entry;
entry = &entries[i];
- reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ inst = adev->ip_map.logical_to_dev_inst ?
+ adev->ip_map.logical_to_dev_inst(
+ adev, entry->hwip, entry->instance) :
+ entry->instance;
+ reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
+ entry->reg;
if (offset == reg)
return true;
}
@@ -407,19 +1370,1042 @@ static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offs
ARRAY_SIZE(rlcg_access_gc_9_4_3));
}
-const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
- .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
- .select_se_sh = &gfx_v9_4_3_select_se_sh,
- .read_wave_data = &gfx_v9_4_3_read_wave_data,
- .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
- .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
- .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
-};
+static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
+ bool enable, int xcc_id)
+{
+ if (enable) {
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
+ } else {
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
+ (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ adev->gfx.kiq[xcc_id].ring.sched.ready = false;
+ }
+ udelay(50);
+}
+
+static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ const struct gfx_firmware_header_v1_0 *mec_hdr;
+ const __le32 *fw_data;
+ unsigned i;
+ u32 tmp;
+ u32 mec_ucode_addr_offset;
+ u32 mec_ucode_data_offset;
+
+ if (!adev->gfx.mec_fw)
+ return -EINVAL;
+
+ gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
+
+ mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
+
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
+ tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
+
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
+ adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
+ upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
+
+ mec_ucode_addr_offset =
+ SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
+ mec_ucode_data_offset =
+ SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
+
+ /* MEC1 */
+ WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
+ for (i = 0; i < mec_hdr->jt_size; i++)
+ WREG32(mec_ucode_data_offset,
+ le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
+
+ WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
+ /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
+
+ return 0;
+}
+
+/* KIQ functions */
+static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
+{
+ uint32_t tmp;
+ struct amdgpu_device *adev = ring->adev;
+
+ /* tell RLC which is KIQ queue */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
+ tmp &= 0xffffff00;
+ tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
+ tmp |= 0x80;
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
+}
+
+static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ }
+ }
+}
+
+static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct v9_mqd *mqd = ring->mqd_ptr;
+ uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
+ uint32_t tmp;
+
+ mqd->header = 0xC0310800;
+ mqd->compute_pipelinestat_enable = 0x00000001;
+ mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
+ mqd->compute_misc_reserved = 0x00000003;
+
+ mqd->dynamic_cu_mask_addr_lo =
+ lower_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
+ mqd->dynamic_cu_mask_addr_hi =
+ upper_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
+
+ eop_base_addr = ring->eop_gpu_addr >> 8;
+ mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
+ mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
+ (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
+
+ mqd->cp_hqd_eop_control = tmp;
+
+ /* enable doorbell? */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
+
+ if (ring->use_doorbell) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_OFFSET, ring->doorbell_index);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_SOURCE, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_HIT, 0);
+ } else {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN, 0);
+ }
+
+ mqd->cp_hqd_pq_doorbell_control = tmp;
+
+ /* disable the queue if it's active */
+ ring->wptr = 0;
+ mqd->cp_hqd_dequeue_request = 0;
+ mqd->cp_hqd_pq_rptr = 0;
+ mqd->cp_hqd_pq_wptr_lo = 0;
+ mqd->cp_hqd_pq_wptr_hi = 0;
+
+ /* set the pointer to the MQD */
+ mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
+ mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
+
+ /* set MQD vmid to 0 */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
+ mqd->cp_mqd_control = tmp;
+
+ /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+ hqd_gpu_addr = ring->gpu_addr >> 8;
+ mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
+ mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
+
+ /* set up the HQD, this is similar to CP_RB0_CNTL */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
+ (order_base_2(ring->ring_size / 4) - 1));
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
+ ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+#ifdef __BIG_ENDIAN
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
+#endif
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ mqd->cp_hqd_pq_control = tmp;
+
+ /* set the wb address whether it's enabled or not */
+ wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
+ mqd->cp_hqd_pq_rptr_report_addr_hi =
+ upper_32_bits(wb_gpu_addr) & 0xffff;
+
+ /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
+ wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
+ mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
+
+ /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+ ring->wptr = 0;
+ mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
+
+ /* set the vmid for the queue */
+ mqd->cp_hqd_vmid = 0;
+
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
+ mqd->cp_hqd_persistent_state = tmp;
+
+ /* set MIN_IB_AVAIL_SIZE */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
+ mqd->cp_hqd_ib_control = tmp;
+
+ /* set static priority for a queue/ring */
+ gfx_v9_4_3_mqd_set_priority(ring, mqd);
+ mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
+
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
+
+ return 0;
+}
+
+static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
+ int xcc_id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct v9_mqd *mqd = ring->mqd_ptr;
+ int j;
+
+ /* disable wptr polling */
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
+
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
+ mqd->cp_hqd_eop_base_addr_lo);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
+ mqd->cp_hqd_eop_base_addr_hi);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
+ mqd->cp_hqd_eop_control);
+
+ /* enable doorbell? */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
+ mqd->cp_hqd_pq_doorbell_control);
+
+ /* disable the queue if it's active */
+ if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
+ mqd->cp_hqd_dequeue_request);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
+ mqd->cp_hqd_pq_rptr);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
+ mqd->cp_hqd_pq_wptr_lo);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
+ mqd->cp_hqd_pq_wptr_hi);
+ }
+
+ /* set the pointer to the MQD */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
+ mqd->cp_mqd_base_addr_lo);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
+ mqd->cp_mqd_base_addr_hi);
+
+ /* set MQD vmid to 0 */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
+ mqd->cp_mqd_control);
+
+ /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
+ mqd->cp_hqd_pq_base_lo);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
+ mqd->cp_hqd_pq_base_hi);
+
+ /* set up the HQD, this is similar to CP_RB0_CNTL */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
+ mqd->cp_hqd_pq_control);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
+ mqd->cp_hqd_pq_rptr_report_addr_lo);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+ mqd->cp_hqd_pq_rptr_report_addr_hi);
+
+ /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
+ mqd->cp_hqd_pq_wptr_poll_addr_lo);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
+ mqd->cp_hqd_pq_wptr_poll_addr_hi);
+
+ /* enable the doorbell if requested */
+ if (ring->use_doorbell) {
+ WREG32_SOC15(
+ GC, GET_INST(GC, xcc_id),
+ regCP_MEC_DOORBELL_RANGE_LOWER,
+ ((adev->doorbell_index.kiq +
+ xcc_id * adev->doorbell_index.xcc_doorbell_range) *
+ 2) << 2);
+ WREG32_SOC15(
+ GC, GET_INST(GC, xcc_id),
+ regCP_MEC_DOORBELL_RANGE_UPPER,
+ ((adev->doorbell_index.userqueue_end +
+ xcc_id * adev->doorbell_index.xcc_doorbell_range) *
+ 2) << 2);
+ }
+
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
+ mqd->cp_hqd_pq_doorbell_control);
+
+ /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
+ mqd->cp_hqd_pq_wptr_lo);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
+ mqd->cp_hqd_pq_wptr_hi);
+
+ /* set the vmid for the queue */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
+
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
+ mqd->cp_hqd_persistent_state);
+
+ /* activate the queue */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
+ mqd->cp_hqd_active);
+
+ if (ring->use_doorbell)
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
+
+ return 0;
+}
+
+static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
+ int xcc_id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int j;
+
+ /* disable the queue if it's active */
+ if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
+
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
+
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+
+ if (j == AMDGPU_MAX_USEC_TIMEOUT) {
+ DRM_DEBUG("%s dequeue request failed.\n", ring->name);
+
+ /* Manual disable if dequeue request times out */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
+ }
+
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
+ 0);
+ }
+
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
+
+ return 0;
+}
+
+static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct v9_mqd *mqd = ring->mqd_ptr;
+ struct v9_mqd *tmp_mqd;
+
+ gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
+
+ /* GPU could be in bad state during probe, driver trigger the reset
+ * after load the SMU, in this case , the mqd is not be initialized.
+ * driver need to re-init the mqd.
+ * check mqd->cp_hqd_pq_control since this value should not be 0
+ */
+ tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
+ if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
+ /* for GPU_RESET case , reset MQD to a clean status */
+ if (adev->gfx.kiq[xcc_id].mqd_backup)
+ memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
+
+ /* reset ring buffer */
+ ring->wptr = 0;
+ amdgpu_ring_clear_ring(ring);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
+ gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
+ mutex_unlock(&adev->srbm_mutex);
+ } else {
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
+ mutex_lock(&adev->srbm_mutex);
+ if (amdgpu_sriov_vf(adev) && adev->in_suspend)
+ amdgpu_ring_clear_ring(ring);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
+ gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
+ gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.kiq[xcc_id].mqd_backup)
+ memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct v9_mqd *mqd = ring->mqd_ptr;
+ int mqd_idx = ring - &adev->gfx.compute_ring[0];
+ struct v9_mqd *tmp_mqd;
+
+ /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
+ * is not be initialized before
+ */
+ tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
+
+ if (!tmp_mqd->cp_hqd_pq_control ||
+ (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
+ gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+ memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
+ } else {
+ /* restore MQD to a clean status */
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
+ /* reset ring buffer */
+ ring->wptr = 0;
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
+ amdgpu_ring_clear_ring(ring);
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_ring *ring;
+ int j;
+
+ for (j = 0; j < adev->gfx.num_compute_rings; j++) {
+ ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings];
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me,
+ ring->pipe,
+ ring->queue, 0, GET_INST(GC, xcc_id));
+ gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
+ mutex_unlock(&adev->srbm_mutex);
+ }
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ ring = &adev->gfx.kiq[xcc_id].ring;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ return r;
+ }
+
+ gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ return 0;
+}
+
+static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_ring *ring = NULL;
+ int r = 0, i;
+
+ gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+ goto done;
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r)
+ goto done;
+ }
+
+ r = amdgpu_gfx_enable_kcq(adev, xcc_id);
+done:
+ return r;
+}
+
+static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_ring *ring;
+ int r, j;
+
+ gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
+
+ r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
+ if (r)
+ return r;
+ }
+
+ r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
+ if (r)
+ return r;
+
+ r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
+ if (r)
+ return r;
+
+ for (j = 0; j < adev->gfx.num_compute_rings; j++) {
+ ring = &adev->gfx.compute_ring
+ [j + xcc_id * adev->gfx.num_compute_rings];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
+
+ gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
+
+ return 0;
+}
+
+static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
+{
+ int r = 0, i, num_xcc;
+
+ if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+ AMDGPU_XCP_FL_NONE) ==
+ AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
+ r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr,
+ amdgpu_user_partt_mode);
+
+ if (r)
+ return r;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
+ r = gfx_v9_4_3_xcc_cp_resume(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable,
+ int xcc_id)
+{
+ gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id);
+}
+
+static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
+{
+ if (amdgpu_gfx_disable_kcq(adev, xcc_id))
+ DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
+
+ if (amdgpu_sriov_vf(adev)) {
+ /* must disable polling for SRIOV when hw finished, otherwise
+ * CPC engine may still keep fetching WB address which is already
+ * invalid after sw finished and trigger DMAR reading error in
+ * hypervisor side.
+ */
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
+ return;
+ }
+
+ /* Use deinitialize sequence from CAIL when unbinding device
+ * from driver, otherwise KIQ is hanging when binding back
+ */
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
+ adev->gfx.kiq[xcc_id].ring.pipe,
+ adev->gfx.kiq[xcc_id].ring.queue, 0,
+ GET_INST(GC, xcc_id));
+ gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
+ xcc_id);
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
+ gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id);
+}
+
+static int gfx_v9_4_3_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!amdgpu_sriov_vf(adev))
+ gfx_v9_4_3_init_golden_registers(adev);
+
+ gfx_v9_4_3_constants_init(adev);
+
+ r = adev->gfx.rlc.funcs->resume(adev);
+ if (r)
+ return r;
+
+ r = gfx_v9_4_3_cp_resume(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int gfx_v9_4_3_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, num_xcc;
+
+ amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
+ gfx_v9_4_3_xcc_fini(adev, i);
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_suspend(void *handle)
+{
+ return gfx_v9_4_3_hw_fini(handle);
+}
+
+static int gfx_v9_4_3_resume(void *handle)
+{
+ return gfx_v9_4_3_hw_init(handle);
+}
+
+static bool gfx_v9_4_3_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
+ if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
+ GRBM_STATUS, GUI_ACTIVE))
+ return false;
+ }
+ return true;
+}
+
+static int gfx_v9_4_3_wait_for_idle(void *handle)
+{
+ unsigned i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (gfx_v9_4_3_is_idle(handle))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int gfx_v9_4_3_soft_reset(void *handle)
+{
+ u32 grbm_soft_reset = 0;
+ u32 tmp;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* GRBM_STATUS */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
+ if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
+ GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
+ GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
+ GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
+ GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
+ GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+ GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+ GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
+ }
+
+ if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+ GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
+ }
+
+ /* GRBM_STATUS2 */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
+ if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+ GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
+
+
+ if (grbm_soft_reset) {
+ /* stop the rlc */
+ adev->gfx.rlc.funcs->stop(adev);
+
+ /* Disable MEC parsing/prefetching */
+ gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
+
+ if (grbm_soft_reset) {
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+ }
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
+ return 0;
+}
+
+static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
+ uint32_t vmid,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* GDS Base */
+ gfx_v9_4_3_write_data_to_reg(ring, 0, false,
+ SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
+ gds_base);
+
+ /* GDS Size */
+ gfx_v9_4_3_write_data_to_reg(ring, 0, false,
+ SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
+ gds_size);
+
+ /* GWS */
+ gfx_v9_4_3_write_data_to_reg(ring, 0, false,
+ SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
+ gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
-const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
+ /* OA */
+ gfx_v9_4_3_write_data_to_reg(ring, 0, false,
+ SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
+ (1 << (oa_size + oa_base)) - (1 << oa_base));
+}
+
+static int gfx_v9_4_3_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ gfx_v9_4_3_set_kiq_pm4_funcs(adev);
+ gfx_v9_4_3_set_ring_funcs(adev);
+ gfx_v9_4_3_set_irq_funcs(adev);
+ gfx_v9_4_3_set_gds_init(adev);
+ gfx_v9_4_3_set_rlc_funcs(adev);
+
+ /* init rlcg reg access ctrl */
+ gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
+
+ return gfx_v9_4_3_init_microcode(adev);
+}
+
+static int gfx_v9_4_3_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
+ if (r)
+ return r;
+
+ if (adev->gfx.ras &&
+ adev->gfx.ras->enable_watchdog_timer)
+ adev->gfx.ras->enable_watchdog_timer(adev);
+
+ return 0;
+}
+
+static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
+ bool enable, int xcc_id)
+{
+ uint32_t def, data;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
+ return;
+
+ def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
+ regRLC_CGTT_MGCG_OVERRIDE);
+
+ if (enable)
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
+ else
+ data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id),
+ regRLC_CGTT_MGCG_OVERRIDE, data);
+
+ def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL);
+
+ if (enable)
+ data &= ~RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
+ else
+ data |= RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL, data);
+}
+
+static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
+ bool enable, int xcc_id)
+{
+ uint32_t def, data;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
+ return;
+
+ def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
+ regRLC_CGTT_MGCG_OVERRIDE);
+
+ if (enable)
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
+ else
+ data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id),
+ regRLC_CGTT_MGCG_OVERRIDE, data);
+}
+
+static void
+gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable, int xcc_id)
+{
+ uint32_t data, def;
+
+ /* It is disabled by HW by default */
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ /* 1 - RLC_CGTT_MGCG_OVERRIDE */
+ def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
+
+ data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+
+ if (def != data)
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
+
+ /* MGLS is a global flag to control all MGLS in GFX */
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+ /* 2 - RLC memory Light sleep */
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+ def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
+ data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
+ if (def != data)
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
+ }
+ /* 3 - CP memory Light sleep */
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+ def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
+ data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ if (def != data)
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
+ }
+ }
+ } else {
+ /* 1 - MGCG_OVERRIDE */
+ def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
+
+ data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+
+ if (def != data)
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
+
+ /* 2 - disable MGLS in RLC */
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
+ if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
+ data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
+ }
+
+ /* 3 - disable MGLS in CP */
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
+ data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
+ }
+ }
+
+}
+
+static void
+gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable, int xcc_id)
+{
+ uint32_t def, data;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+
+ def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
+ /* unset CGCG override */
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
+ else
+ data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
+ /* update CGCG and CGLS override bits */
+ if (def != data)
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
+
+ /* enable cgcg FSM(0x0000363F) */
+ def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
+
+ data = (0x36
+ << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
+ data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
+ RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
+ if (def != data)
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
+
+ /* set IDLE_POLL_COUNT(0x00900100) */
+ def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
+ data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
+ (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
+ if (def != data)
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
+ } else {
+ def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
+ /* reset CGCG/CGLS bits */
+ data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+ /* disable cgcg and cgls in FSM */
+ if (def != data)
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
+ }
+
+}
+
+static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
+ bool enable, int xcc_id)
+{
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
+
+ if (enable) {
+ /* FGCG */
+ gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
+ gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
+
+ /* CGCG/CGLS should be enabled after MGCG/MGLS
+ * === MGCG + MGLS ===
+ */
+ gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
+ xcc_id);
+ /* === CGCG + CGLS === */
+ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
+ xcc_id);
+ } else {
+ /* CGCG/CGLS should be disabled before MGCG/MGLS
+ * === CGCG + CGLS ===
+ */
+ gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
+ xcc_id);
+ /* === MGCG + MGLS === */
+ gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
+ xcc_id);
+
+ /* FGCG */
+ gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
+ gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
+ }
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
+
+ return 0;
+}
+
+static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
.is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
- .set_safe_mode = gfx_v9_4_3_set_safe_mode,
- .unset_safe_mode = gfx_v9_4_3_unset_safe_mode,
+ .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
+ .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
.init = gfx_v9_4_3_rlc_init,
.resume = gfx_v9_4_3_rlc_resume,
.stop = gfx_v9_4_3_rlc_stop,
@@ -428,3 +2414,2016 @@ const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
.update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
};
+
+static int gfx_v9_4_3_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+static int gfx_v9_4_3_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, num_xcc;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 3):
+ for (i = 0; i < num_xcc; i++)
+ gfx_v9_4_3_xcc_update_gfx_clock_gating(
+ adev, state == AMD_CG_STATE_GATE, i);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ /* AMD_CG_SUPPORT_GFX_MGCG */
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
+ if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
+ *flags |= AMD_CG_SUPPORT_GFX_MGCG;
+
+ /* AMD_CG_SUPPORT_GFX_CGCG */
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
+ if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_CGCG;
+
+ /* AMD_CG_SUPPORT_GFX_CGLS */
+ if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_CGLS;
+
+ /* AMD_CG_SUPPORT_GFX_RLC_LS */
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
+ if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
+
+ /* AMD_CG_SUPPORT_GFX_CP_LS */
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
+}
+
+static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 ref_and_mask, reg_mem_engine;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ switch (ring->me) {
+ case 1:
+ ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
+ break;
+ case 2:
+ ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
+ break;
+ default:
+ return;
+ }
+ reg_mem_engine = 0;
+ } else {
+ ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
+ reg_mem_engine = 1; /* pfp */
+ }
+
+ gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ ref_and_mask, ref_and_mask, 0x20);
+}
+
+static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+
+ /* Currently, there is a high possibility to get wave ID mismatch
+ * between ME and GDS, leading to a hw deadlock, because ME generates
+ * different wave IDs than the GDS expects. This situation happens
+ * randomly when at least 5 compute pipes use GDS ordered append.
+ * The wave IDs generated by ME are also wrong after suspend/resume.
+ * Those are probably bugs somewhere else in the kernel driver.
+ *
+ * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
+ * GDS to 0 for this ring (me/pipe).
+ */
+ if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
+ amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
+ amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, control);
+}
+
+static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned flags)
+{
+ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+ bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+ bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
+
+ /* RELEASE_MEM - flush caches, send int */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
+ amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
+ EOP_TC_NC_ACTION_EN) :
+ (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+ EOP_TC_WB_ACTION_EN |
+ EOP_TC_MD_ACTION_EN)) |
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+ EVENT_INDEX(5)));
+ amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
+
+ /*
+ * the address should be Qword aligned if 64bit write, Dword
+ * aligned if only send 32bit data low (discard data high)
+ */
+ if (write64bit)
+ BUG_ON(addr & 0x7);
+ else
+ BUG_ON(addr & 0x3);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+ amdgpu_ring_write(ring, 0);
+}
+
+static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
+ lower_32_bits(addr), upper_32_bits(addr),
+ seq, 0xffffffff, 4);
+}
+
+static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+}
+
+static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
+{
+ return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
+}
+
+static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
+{
+ u64 wptr;
+
+ /* XXX check if swapping is necessary on BE */
+ if (ring->use_doorbell)
+ wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
+ else
+ BUG();
+ return wptr;
+}
+
+static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* XXX check if swapping is necessary on BE */
+ if (ring->use_doorbell) {
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
+ } else {
+ BUG(); /* only DOORBELL method supported on gfx9 now */
+ }
+}
+
+static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned int flags)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* we only allocate 32bit for each seq wb address */
+ BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ /* write fence seq to the "addr" */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+
+ if (flags & AMDGPU_FENCE_FLAG_INT) {
+ /* set register to trigger INT */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
+ amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
+ }
+}
+
+static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
+ amdgpu_ring_write(ring, 0 | /* src: register*/
+ (5 << 8) | /* dst: memory */
+ (1 << 20)); /* write confirm */
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
+ reg_val_offs * 4));
+ amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
+ reg_val_offs * 4));
+}
+
+static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val)
+{
+ uint32_t cmd = 0;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
+ break;
+ case AMDGPU_RING_TYPE_KIQ:
+ cmd = (1 << 16); /* no inc addr */
+ break;
+ default:
+ cmd = WR_CONFIRM;
+ break;
+ }
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, cmd);
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, val);
+}
+
+static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+}
+
+static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
+ ref, mask);
+}
+
+static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
+ struct amdgpu_device *adev, int me, int pipe,
+ enum amdgpu_interrupt_state state, int xcc_id)
+{
+ u32 mec_int_cntl, mec_int_cntl_reg;
+
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+
+ if (me == 1) {
+ switch (pipe) {
+ case 0:
+ mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
+ break;
+ case 1:
+ mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
+ break;
+ case 2:
+ mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
+ break;
+ case 3:
+ mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
+ break;
+ default:
+ DRM_DEBUG("invalid pipe %d\n", pipe);
+ return;
+ }
+ } else {
+ DRM_DEBUG("invalid me %d\n", me);
+ return;
+ }
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ TIME_STAMP_INT_ENABLE, 0);
+ WREG32(mec_int_cntl_reg, mec_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ TIME_STAMP_INT_ENABLE, 1);
+ WREG32(mec_int_cntl_reg, mec_int_cntl);
+ break;
+ default:
+ break;
+ }
+}
+
+static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ int i, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < num_xcc; i++)
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ int i, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < num_xcc; i++)
+ WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ int i, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
+ switch (type) {
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
+ gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
+ adev, 1, 0, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
+ gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
+ adev, 1, 1, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
+ gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
+ adev, 1, 2, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
+ gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
+ adev, 1, 3, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
+ gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
+ adev, 2, 0, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
+ gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
+ adev, 2, 1, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
+ gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
+ adev, 2, 2, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
+ gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
+ adev, 2, 3, state, i);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ int i, xcc_id;
+ u8 me_id, pipe_id, queue_id;
+ struct amdgpu_ring *ring;
+
+ DRM_DEBUG("IH: CP EOP\n");
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ queue_id = (entry->ring_id & 0x70) >> 4;
+
+ xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
+
+ if (xcc_id == -EINVAL)
+ return -EINVAL;
+
+ switch (me_id) {
+ case 0:
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring
+ [i +
+ xcc_id * adev->gfx.num_compute_rings];
+ /* Per-queue interrupt is supported for MEC starting from VI.
+ * The interrupt can only be enabled/disabled per pipe instead of per queue.
+ */
+
+ if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
+ amdgpu_fence_process(ring);
+ }
+ break;
+ }
+ return 0;
+}
+
+static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ u8 me_id, pipe_id, queue_id;
+ struct amdgpu_ring *ring;
+ int i, xcc_id;
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ queue_id = (entry->ring_id & 0x70) >> 4;
+
+ xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
+
+ if (xcc_id == -EINVAL)
+ return;
+
+ switch (me_id) {
+ case 0:
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring
+ [i +
+ xcc_id * adev->gfx.num_compute_rings];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ }
+}
+
+static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal register access in command stream\n");
+ gfx_v9_4_3_fault(adev, entry);
+ return 0;
+}
+
+static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal instruction in command stream\n");
+ gfx_v9_4_3_fault(adev, entry);
+ return 0;
+}
+
+static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ const unsigned int cp_coher_cntl =
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
+
+ /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+}
+
+static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
+ uint32_t pipe, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ uint32_t wcl_cs_reg;
+
+ /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
+ val = enable ? 0x1 : 0x7f;
+
+ switch (pipe) {
+ case 0:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
+ break;
+ case 1:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
+ break;
+ case 2:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
+ break;
+ case 3:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
+ break;
+ default:
+ DRM_DEBUG("invalid pipe %d\n", pipe);
+ return;
+ }
+
+ amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
+
+}
+static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ int i;
+
+ /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
+ * number of gfx waves. Setting 5 bit will make sure gfx only gets
+ * around 25% of gpu resources.
+ */
+ val = enable ? 0x1f : 0x07ffffff;
+ amdgpu_ring_emit_wreg(ring,
+ SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
+ val);
+
+ /* Restrict waves for normal/low priority compute queues as well
+ * to get best QoS for high priority compute jobs.
+ *
+ * amdgpu controls only 1st ME(0-3 CS pipes).
+ */
+ for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
+ if (i != ring->pipe)
+ gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
+
+ }
+}
+
+enum amdgpu_gfx_cp_ras_mem_id {
+ AMDGPU_GFX_CP_MEM1 = 1,
+ AMDGPU_GFX_CP_MEM2,
+ AMDGPU_GFX_CP_MEM3,
+ AMDGPU_GFX_CP_MEM4,
+ AMDGPU_GFX_CP_MEM5,
+};
+
+enum amdgpu_gfx_gcea_ras_mem_id {
+ AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
+ AMDGPU_GFX_GCEA_IORD_CMDMEM,
+ AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
+ AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
+ AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
+ AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
+ AMDGPU_GFX_GCEA_MAM_DMEM0,
+ AMDGPU_GFX_GCEA_MAM_DMEM1,
+ AMDGPU_GFX_GCEA_MAM_DMEM2,
+ AMDGPU_GFX_GCEA_MAM_DMEM3,
+ AMDGPU_GFX_GCEA_MAM_AMEM0,
+ AMDGPU_GFX_GCEA_MAM_AMEM1,
+ AMDGPU_GFX_GCEA_MAM_AMEM2,
+ AMDGPU_GFX_GCEA_MAM_AMEM3,
+ AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
+ AMDGPU_GFX_GCEA_WRET_TAGMEM,
+ AMDGPU_GFX_GCEA_RRET_TAGMEM,
+ AMDGPU_GFX_GCEA_IOWR_DATAMEM,
+ AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
+ AMDGPU_GFX_GCEA_DRAM_DATAMEM,
+};
+
+enum amdgpu_gfx_gc_cane_ras_mem_id {
+ AMDGPU_GFX_GC_CANE_MEM0 = 0,
+};
+
+enum amdgpu_gfx_gcutcl2_ras_mem_id {
+ AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
+};
+
+enum amdgpu_gfx_gds_ras_mem_id {
+ AMDGPU_GFX_GDS_MEM0 = 0,
+};
+
+enum amdgpu_gfx_lds_ras_mem_id {
+ AMDGPU_GFX_LDS_BANK0 = 0,
+ AMDGPU_GFX_LDS_BANK1,
+ AMDGPU_GFX_LDS_BANK2,
+ AMDGPU_GFX_LDS_BANK3,
+ AMDGPU_GFX_LDS_BANK4,
+ AMDGPU_GFX_LDS_BANK5,
+ AMDGPU_GFX_LDS_BANK6,
+ AMDGPU_GFX_LDS_BANK7,
+ AMDGPU_GFX_LDS_BANK8,
+ AMDGPU_GFX_LDS_BANK9,
+ AMDGPU_GFX_LDS_BANK10,
+ AMDGPU_GFX_LDS_BANK11,
+ AMDGPU_GFX_LDS_BANK12,
+ AMDGPU_GFX_LDS_BANK13,
+ AMDGPU_GFX_LDS_BANK14,
+ AMDGPU_GFX_LDS_BANK15,
+ AMDGPU_GFX_LDS_BANK16,
+ AMDGPU_GFX_LDS_BANK17,
+ AMDGPU_GFX_LDS_BANK18,
+ AMDGPU_GFX_LDS_BANK19,
+ AMDGPU_GFX_LDS_BANK20,
+ AMDGPU_GFX_LDS_BANK21,
+ AMDGPU_GFX_LDS_BANK22,
+ AMDGPU_GFX_LDS_BANK23,
+ AMDGPU_GFX_LDS_BANK24,
+ AMDGPU_GFX_LDS_BANK25,
+ AMDGPU_GFX_LDS_BANK26,
+ AMDGPU_GFX_LDS_BANK27,
+ AMDGPU_GFX_LDS_BANK28,
+ AMDGPU_GFX_LDS_BANK29,
+ AMDGPU_GFX_LDS_BANK30,
+ AMDGPU_GFX_LDS_BANK31,
+ AMDGPU_GFX_LDS_SP_BUFFER_A,
+ AMDGPU_GFX_LDS_SP_BUFFER_B,
+};
+
+enum amdgpu_gfx_rlc_ras_mem_id {
+ AMDGPU_GFX_RLC_GPMF32 = 1,
+ AMDGPU_GFX_RLC_RLCVF32,
+ AMDGPU_GFX_RLC_SCRATCH,
+ AMDGPU_GFX_RLC_SRM_ARAM,
+ AMDGPU_GFX_RLC_SRM_DRAM,
+ AMDGPU_GFX_RLC_TCTAG,
+ AMDGPU_GFX_RLC_SPM_SE,
+ AMDGPU_GFX_RLC_SPM_GRBMT,
+};
+
+enum amdgpu_gfx_sp_ras_mem_id {
+ AMDGPU_GFX_SP_SIMDID0 = 0,
+};
+
+enum amdgpu_gfx_spi_ras_mem_id {
+ AMDGPU_GFX_SPI_MEM0 = 0,
+ AMDGPU_GFX_SPI_MEM1,
+ AMDGPU_GFX_SPI_MEM2,
+ AMDGPU_GFX_SPI_MEM3,
+};
+
+enum amdgpu_gfx_sqc_ras_mem_id {
+ AMDGPU_GFX_SQC_INST_CACHE_A = 100,
+ AMDGPU_GFX_SQC_INST_CACHE_B = 101,
+ AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
+ AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
+ AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
+ AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
+ AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
+ AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
+ AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
+ AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
+ AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
+ AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
+ AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
+ AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
+ AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
+ AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
+ AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
+ AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
+ AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
+ AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
+ AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
+ AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
+ AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
+};
+
+enum amdgpu_gfx_sq_ras_mem_id {
+ AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
+ AMDGPU_GFX_SQ_SGPR_MEM1,
+ AMDGPU_GFX_SQ_SGPR_MEM2,
+ AMDGPU_GFX_SQ_SGPR_MEM3,
+};
+
+enum amdgpu_gfx_ta_ras_mem_id {
+ AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
+ AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
+ AMDGPU_GFX_TA_FS_CFIFO_RAM,
+ AMDGPU_GFX_TA_FSX_LFIFO,
+ AMDGPU_GFX_TA_FS_DFIFO_RAM,
+};
+
+enum amdgpu_gfx_tcc_ras_mem_id {
+ AMDGPU_GFX_TCC_MEM1 = 1,
+};
+
+enum amdgpu_gfx_tca_ras_mem_id {
+ AMDGPU_GFX_TCA_MEM1 = 1,
+};
+
+enum amdgpu_gfx_tci_ras_mem_id {
+ AMDGPU_GFX_TCIW_MEM = 1,
+};
+
+enum amdgpu_gfx_tcp_ras_mem_id {
+ AMDGPU_GFX_TCP_LFIFO0 = 1,
+ AMDGPU_GFX_TCP_SET0BANK0_RAM,
+ AMDGPU_GFX_TCP_SET0BANK1_RAM,
+ AMDGPU_GFX_TCP_SET0BANK2_RAM,
+ AMDGPU_GFX_TCP_SET0BANK3_RAM,
+ AMDGPU_GFX_TCP_SET1BANK0_RAM,
+ AMDGPU_GFX_TCP_SET1BANK1_RAM,
+ AMDGPU_GFX_TCP_SET1BANK2_RAM,
+ AMDGPU_GFX_TCP_SET1BANK3_RAM,
+ AMDGPU_GFX_TCP_SET2BANK0_RAM,
+ AMDGPU_GFX_TCP_SET2BANK1_RAM,
+ AMDGPU_GFX_TCP_SET2BANK2_RAM,
+ AMDGPU_GFX_TCP_SET2BANK3_RAM,
+ AMDGPU_GFX_TCP_SET3BANK0_RAM,
+ AMDGPU_GFX_TCP_SET3BANK1_RAM,
+ AMDGPU_GFX_TCP_SET3BANK2_RAM,
+ AMDGPU_GFX_TCP_SET3BANK3_RAM,
+ AMDGPU_GFX_TCP_VM_FIFO,
+ AMDGPU_GFX_TCP_DB_TAGRAM0,
+ AMDGPU_GFX_TCP_DB_TAGRAM1,
+ AMDGPU_GFX_TCP_DB_TAGRAM2,
+ AMDGPU_GFX_TCP_DB_TAGRAM3,
+ AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
+ AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
+ AMDGPU_GFX_TCP_CMD_FIFO,
+};
+
+enum amdgpu_gfx_td_ras_mem_id {
+ AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
+ AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
+ AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
+};
+
+enum amdgpu_gfx_tcx_ras_mem_id {
+ AMDGPU_GFX_TCX_FIFOD0 = 0,
+ AMDGPU_GFX_TCX_FIFOD1,
+ AMDGPU_GFX_TCX_FIFOD2,
+ AMDGPU_GFX_TCX_FIFOD3,
+ AMDGPU_GFX_TCX_FIFOD4,
+ AMDGPU_GFX_TCX_FIFOD5,
+ AMDGPU_GFX_TCX_FIFOD6,
+ AMDGPU_GFX_TCX_FIFOD7,
+ AMDGPU_GFX_TCX_FIFOB0,
+ AMDGPU_GFX_TCX_FIFOB1,
+ AMDGPU_GFX_TCX_FIFOB2,
+ AMDGPU_GFX_TCX_FIFOB3,
+ AMDGPU_GFX_TCX_FIFOB4,
+ AMDGPU_GFX_TCX_FIFOB5,
+ AMDGPU_GFX_TCX_FIFOB6,
+ AMDGPU_GFX_TCX_FIFOB7,
+ AMDGPU_GFX_TCX_FIFOA0,
+ AMDGPU_GFX_TCX_FIFOA1,
+ AMDGPU_GFX_TCX_FIFOA2,
+ AMDGPU_GFX_TCX_FIFOA3,
+ AMDGPU_GFX_TCX_FIFOA4,
+ AMDGPU_GFX_TCX_FIFOA5,
+ AMDGPU_GFX_TCX_FIFOA6,
+ AMDGPU_GFX_TCX_FIFOA7,
+ AMDGPU_GFX_TCX_CFIFO0,
+ AMDGPU_GFX_TCX_CFIFO1,
+ AMDGPU_GFX_TCX_CFIFO2,
+ AMDGPU_GFX_TCX_CFIFO3,
+ AMDGPU_GFX_TCX_CFIFO4,
+ AMDGPU_GFX_TCX_CFIFO5,
+ AMDGPU_GFX_TCX_CFIFO6,
+ AMDGPU_GFX_TCX_CFIFO7,
+ AMDGPU_GFX_TCX_FIFO_ACKB0,
+ AMDGPU_GFX_TCX_FIFO_ACKB1,
+ AMDGPU_GFX_TCX_FIFO_ACKB2,
+ AMDGPU_GFX_TCX_FIFO_ACKB3,
+ AMDGPU_GFX_TCX_FIFO_ACKB4,
+ AMDGPU_GFX_TCX_FIFO_ACKB5,
+ AMDGPU_GFX_TCX_FIFO_ACKB6,
+ AMDGPU_GFX_TCX_FIFO_ACKB7,
+ AMDGPU_GFX_TCX_FIFO_ACKD0,
+ AMDGPU_GFX_TCX_FIFO_ACKD1,
+ AMDGPU_GFX_TCX_FIFO_ACKD2,
+ AMDGPU_GFX_TCX_FIFO_ACKD3,
+ AMDGPU_GFX_TCX_FIFO_ACKD4,
+ AMDGPU_GFX_TCX_FIFO_ACKD5,
+ AMDGPU_GFX_TCX_FIFO_ACKD6,
+ AMDGPU_GFX_TCX_FIFO_ACKD7,
+ AMDGPU_GFX_TCX_DST_FIFOA0,
+ AMDGPU_GFX_TCX_DST_FIFOA1,
+ AMDGPU_GFX_TCX_DST_FIFOA2,
+ AMDGPU_GFX_TCX_DST_FIFOA3,
+ AMDGPU_GFX_TCX_DST_FIFOA4,
+ AMDGPU_GFX_TCX_DST_FIFOA5,
+ AMDGPU_GFX_TCX_DST_FIFOA6,
+ AMDGPU_GFX_TCX_DST_FIFOA7,
+ AMDGPU_GFX_TCX_DST_FIFOB0,
+ AMDGPU_GFX_TCX_DST_FIFOB1,
+ AMDGPU_GFX_TCX_DST_FIFOB2,
+ AMDGPU_GFX_TCX_DST_FIFOB3,
+ AMDGPU_GFX_TCX_DST_FIFOB4,
+ AMDGPU_GFX_TCX_DST_FIFOB5,
+ AMDGPU_GFX_TCX_DST_FIFOB6,
+ AMDGPU_GFX_TCX_DST_FIFOB7,
+ AMDGPU_GFX_TCX_DST_FIFOD0,
+ AMDGPU_GFX_TCX_DST_FIFOD1,
+ AMDGPU_GFX_TCX_DST_FIFOD2,
+ AMDGPU_GFX_TCX_DST_FIFOD3,
+ AMDGPU_GFX_TCX_DST_FIFOD4,
+ AMDGPU_GFX_TCX_DST_FIFOD5,
+ AMDGPU_GFX_TCX_DST_FIFOD6,
+ AMDGPU_GFX_TCX_DST_FIFOD7,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
+ AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
+};
+
+enum amdgpu_gfx_atc_l2_ras_mem_id {
+ AMDGPU_GFX_ATC_L2_MEM0 = 0,
+};
+
+enum amdgpu_gfx_utcl2_ras_mem_id {
+ AMDGPU_GFX_UTCL2_MEM0 = 0,
+};
+
+enum amdgpu_gfx_vml2_ras_mem_id {
+ AMDGPU_GFX_VML2_MEM0 = 0,
+};
+
+enum amdgpu_gfx_vml2_walker_ras_mem_id {
+ AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
+ {AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
+ {AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
+ {AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
+ {AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
+ {AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
+ {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
+ {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
+ {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
+ {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
+ {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
+ {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
+ {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
+ {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
+ {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
+ {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
+ {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
+ {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
+ {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
+ {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
+ {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
+ {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
+ {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
+ {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
+ {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
+ {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
+ {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
+ {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
+ {AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
+ {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
+ {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
+ {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
+ {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
+ {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
+ {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
+ {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
+ {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
+ {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
+ {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
+ {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
+ {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
+ {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
+ {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
+ {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
+ {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
+ {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
+ {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
+ {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
+ {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
+ {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
+ {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
+ {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
+ {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
+ {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
+ {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
+ {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
+ {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
+ {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
+ {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
+ {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
+ {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
+ {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
+ {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
+ {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
+ {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
+ {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
+ {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
+ {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
+ {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
+ {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
+ {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
+ {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
+ {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
+ {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
+ {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
+ {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
+ {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
+ {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
+ {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
+ {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
+ {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
+ {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
+ {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
+ {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
+ {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
+ {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
+ {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
+ {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
+ {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
+ {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
+ {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
+ {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
+ {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
+ {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
+ {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
+ {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
+ {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
+ {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
+ {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
+ {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
+ {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
+ {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
+ {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
+ {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
+ {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
+ {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
+ {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
+ {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
+ {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
+ {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
+ {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
+ {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
+ {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
+ {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
+ {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
+ {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
+ {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
+ {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
+ {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
+ {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
+ {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
+ {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
+ {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
+ {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
+ {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
+ {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
+ {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
+ {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
+ {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
+ {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
+ {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
+ {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
+ {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
+ {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
+ {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
+ {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
+ {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
+ {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
+ {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
+ {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
+ {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
+ {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
+ {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
+ {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
+ {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
+ {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
+ {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
+ {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
+ {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
+ {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
+ {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
+ {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
+ {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
+ {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
+ {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
+ {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
+ {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
+ {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
+ {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
+ {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
+ {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
+ {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
+ {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
+ {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
+ {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
+ {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
+ {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
+ {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
+ {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
+ {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
+ {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
+ {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
+ {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
+ {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
+ {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
+ {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
+ {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
+ {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
+ {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
+ {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
+ {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
+ {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
+ {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
+ {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
+ {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
+ {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
+ {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
+ {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
+ {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
+ {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
+ {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
+ {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
+ {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
+ {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
+ {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
+ {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
+ {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
+ {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
+ {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
+ {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
+ {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
+ {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
+ {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
+ {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
+ {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
+ {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
+ {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
+ {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
+ {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
+ {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
+ {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
+ {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
+ {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
+ {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
+ {AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
+};
+
+static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
+ {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
+};
+
+static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
+ AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
+};
+
+static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
+ AMDGPU_GFX_RLC_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
+ AMDGPU_GFX_CP_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
+ AMDGPU_GFX_CP_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
+ AMDGPU_GFX_CP_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
+ AMDGPU_GFX_GDS_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
+ AMDGPU_GFX_GC_CANE_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
+ AMDGPU_GFX_SPI_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
+ AMDGPU_GFX_SP_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
+ AMDGPU_GFX_SP_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
+ AMDGPU_GFX_SQ_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
+ 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
+ AMDGPU_GFX_SQC_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
+ 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
+ AMDGPU_GFX_TCX_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
+ 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
+ AMDGPU_GFX_TCC_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
+ AMDGPU_GFX_TA_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
+ 31, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
+ AMDGPU_GFX_TCI_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
+ AMDGPU_GFX_TCP_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
+ AMDGPU_GFX_TD_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
+ 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
+ AMDGPU_GFX_GCEA_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
+ AMDGPU_GFX_LDS_MEM, 1},
+};
+
+static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
+ AMDGPU_GFX_RLC_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
+ AMDGPU_GFX_CP_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
+ AMDGPU_GFX_CP_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
+ AMDGPU_GFX_CP_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
+ AMDGPU_GFX_GDS_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
+ AMDGPU_GFX_GC_CANE_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
+ AMDGPU_GFX_SPI_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
+ AMDGPU_GFX_SP_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
+ AMDGPU_GFX_SP_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
+ AMDGPU_GFX_SQ_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
+ 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
+ AMDGPU_GFX_SQC_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
+ 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
+ AMDGPU_GFX_TCX_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
+ 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
+ AMDGPU_GFX_TCC_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
+ AMDGPU_GFX_TA_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
+ 31, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
+ AMDGPU_GFX_TCI_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
+ AMDGPU_GFX_TCP_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
+ AMDGPU_GFX_TD_MEM, 8},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
+ 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
+ AMDGPU_GFX_TCA_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
+ 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
+ AMDGPU_GFX_GCEA_MEM, 1},
+ {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
+ 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
+ AMDGPU_GFX_LDS_MEM, 1},
+};
+
+static const struct soc15_reg_entry gfx_v9_4_3_ea_err_status_regs = {
+ SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16
+};
+
+static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
+ void *ras_error_status, int xcc_id)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ unsigned long ce_count = 0, ue_count = 0;
+ uint32_t i, j, k;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
+ for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
+ /* no need to select if instance number is 1 */
+ if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
+ gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
+ gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
+
+ amdgpu_ras_inst_query_ras_error_count(adev,
+ &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
+ 1,
+ gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
+ gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
+ GET_INST(GC, xcc_id),
+ AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
+ &ce_count);
+
+ amdgpu_ras_inst_query_ras_error_count(adev,
+ &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
+ 1,
+ gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
+ gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
+ GET_INST(GC, xcc_id),
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ &ue_count);
+ }
+ }
+ }
+
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ xcc_id);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ /* the caller should make sure initialize value of
+ * err_data->ue_count and err_data->ce_count
+ */
+ err_data->ce_count += ce_count;
+ err_data->ue_count += ue_count;
+}
+
+static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
+ void *ras_error_status, int xcc_id)
+{
+ uint32_t i, j, k;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
+ for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
+ /* no need to select if instance number is 1 */
+ if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
+ gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
+ gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
+
+ amdgpu_ras_inst_reset_ras_error_count(adev,
+ &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
+ 1,
+ GET_INST(GC, xcc_id));
+
+ amdgpu_ras_inst_reset_ras_error_count(adev,
+ &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
+ 1,
+ GET_INST(GC, xcc_id));
+ }
+ }
+ }
+
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ xcc_id);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v9_4_3_inst_query_ea_err_status(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ uint32_t i, j;
+ uint32_t reg_value;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) {
+ gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id);
+ reg_value = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
+ regGCEA_ERR_STATUS);
+ if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
+ dev_warn(adev->dev,
+ "GCEA err detected at instance: %d, status: 0x%x!\n",
+ j, reg_value);
+ }
+ /* clear after read */
+ reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS,
+ CLEAR_ERROR_STATUS, 0x1);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS,
+ reg_value);
+ }
+ }
+
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ xcc_id);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v9_4_3_inst_query_utc_err_status(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS);
+ if (data) {
+ dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3);
+ }
+
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS);
+ if (data) {
+ dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3);
+ }
+
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
+ regVML2_WALKER_MEM_ECC_STATUS);
+ if (data) {
+ dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS,
+ 0x3);
+ }
+}
+
+static void gfx_v9_4_3_log_cu_timeout_status(struct amdgpu_device *adev,
+ uint32_t status, int xcc_id)
+{
+ struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ uint32_t i, simd, wave;
+ uint32_t wave_status;
+ uint32_t wave_pc_lo, wave_pc_hi;
+ uint32_t wave_exec_lo, wave_exec_hi;
+ uint32_t wave_inst_dw0, wave_inst_dw1;
+ uint32_t wave_ib_sts;
+
+ for (i = 0; i < 32; i++) {
+ if (!((i << 1) & status))
+ continue;
+
+ simd = i / cu_info->max_waves_per_simd;
+ wave = i % cu_info->max_waves_per_simd;
+
+ wave_status = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
+ wave_pc_lo = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
+ wave_pc_hi = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
+ wave_exec_lo =
+ wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
+ wave_exec_hi =
+ wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
+ wave_inst_dw0 =
+ wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
+ wave_inst_dw1 =
+ wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
+ wave_ib_sts = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
+
+ dev_info(
+ adev->dev,
+ "\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n",
+ simd, wave, wave_status,
+ ((uint64_t)wave_pc_hi << 32 | wave_pc_lo),
+ ((uint64_t)wave_exec_hi << 32 | wave_exec_lo),
+ ((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0),
+ wave_ib_sts);
+ }
+}
+
+static void gfx_v9_4_3_inst_query_sq_timeout_status(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ uint32_t se_idx, sh_idx, cu_idx;
+ uint32_t status;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) {
+ for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) {
+ for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) {
+ gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx,
+ cu_idx, xcc_id);
+ status = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
+ regSQ_TIMEOUT_STATUS);
+ if (status != 0) {
+ dev_info(
+ adev->dev,
+ "GFX Watchdog Timeout: SE %d, SH %d, CU %d\n",
+ se_idx, sh_idx, cu_idx);
+ gfx_v9_4_3_log_cu_timeout_status(
+ adev, status, xcc_id);
+ }
+ /* clear old status */
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id),
+ regSQ_TIMEOUT_STATUS, 0);
+ }
+ }
+ }
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ xcc_id);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev,
+ void *ras_error_status, int xcc_id)
+{
+ gfx_v9_4_3_inst_query_ea_err_status(adev, xcc_id);
+ gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id);
+ gfx_v9_4_3_inst_query_sq_timeout_status(adev, xcc_id);
+}
+
+static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, 0x3);
+}
+
+static void gfx_v9_4_3_inst_reset_ea_err_status(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ uint32_t i, j;
+ uint32_t value;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) {
+ gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id);
+ value = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS);
+ value = REG_SET_FIELD(value, GCEA_ERR_STATUS,
+ CLEAR_ERROR_STATUS, 0x1);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS, value);
+ }
+ }
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ xcc_id);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v9_4_3_inst_reset_sq_timeout_status(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ uint32_t se_idx, sh_idx, cu_idx;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) {
+ for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) {
+ for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) {
+ gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx,
+ cu_idx, xcc_id);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id),
+ regSQ_TIMEOUT_STATUS, 0);
+ }
+ }
+ }
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ xcc_id);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev,
+ void *ras_error_status, int xcc_id)
+{
+ gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id);
+ gfx_v9_4_3_inst_reset_ea_err_status(adev, xcc_id);
+ gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id);
+}
+
+static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
+ void *ras_error_status, int xcc_id)
+{
+ uint32_t i;
+ uint32_t data;
+
+ data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
+ amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
+
+ if (amdgpu_watchdog_timer.timeout_fatal_disable &&
+ (amdgpu_watchdog_timer.period < 1 ||
+ amdgpu_watchdog_timer.period > 0x23)) {
+ dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
+ amdgpu_watchdog_timer.period = 0x23;
+ }
+ data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
+ amdgpu_watchdog_timer.period);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
+ WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
+ }
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ xcc_id);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ amdgpu_gfx_ras_error_func(adev, ras_error_status,
+ gfx_v9_4_3_inst_query_ras_err_count);
+}
+
+static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
+}
+
+static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev)
+{
+ amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status);
+}
+
+static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev)
+{
+ amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status);
+}
+
+static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
+{
+ amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
+}
+
+static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
+ .name = "gfx_v9_4_3",
+ .early_init = gfx_v9_4_3_early_init,
+ .late_init = gfx_v9_4_3_late_init,
+ .sw_init = gfx_v9_4_3_sw_init,
+ .sw_fini = gfx_v9_4_3_sw_fini,
+ .hw_init = gfx_v9_4_3_hw_init,
+ .hw_fini = gfx_v9_4_3_hw_fini,
+ .suspend = gfx_v9_4_3_suspend,
+ .resume = gfx_v9_4_3_resume,
+ .is_idle = gfx_v9_4_3_is_idle,
+ .wait_for_idle = gfx_v9_4_3_wait_for_idle,
+ .soft_reset = gfx_v9_4_3_soft_reset,
+ .set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
+ .set_powergating_state = gfx_v9_4_3_set_powergating_state,
+ .get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
+};
+
+static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
+ .support_64bit_ptrs = true,
+ .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
+ .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
+ .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
+ .emit_frame_size =
+ 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
+ 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
+ 5 + /* hdp invalidate */
+ 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+ 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
+ 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
+ 7 + /* gfx_v9_4_3_emit_mem_sync */
+ 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
+ 15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
+ .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
+ .emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
+ .emit_fence = gfx_v9_4_3_ring_emit_fence,
+ .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
+ .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
+ .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
+ .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
+ .test_ring = gfx_v9_4_3_ring_test_ring,
+ .test_ib = gfx_v9_4_3_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+ .emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
+ .emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
+};
+
+static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
+ .type = AMDGPU_RING_TYPE_KIQ,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
+ .support_64bit_ptrs = true,
+ .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
+ .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
+ .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
+ .emit_frame_size =
+ 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
+ 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
+ 5 + /* hdp invalidate */
+ 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+ 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
+ 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
+ .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
+ .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
+ .test_ring = gfx_v9_4_3_ring_test_ring,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .emit_rreg = gfx_v9_4_3_ring_emit_rreg,
+ .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+};
+
+static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i, j, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
+ adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
+
+ for (j = 0; j < adev->gfx.num_compute_rings; j++)
+ adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
+ = &gfx_v9_4_3_ring_funcs_compute;
+ }
+}
+
+static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
+ .set = gfx_v9_4_3_set_eop_interrupt_state,
+ .process = gfx_v9_4_3_eop_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
+ .set = gfx_v9_4_3_set_priv_reg_fault_state,
+ .process = gfx_v9_4_3_priv_reg_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
+ .set = gfx_v9_4_3_set_priv_inst_fault_state,
+ .process = gfx_v9_4_3_priv_inst_irq,
+};
+
+static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+ adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
+
+ adev->gfx.priv_reg_irq.num_types = 1;
+ adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
+
+ adev->gfx.priv_inst_irq.num_types = 1;
+ adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
+}
+
+static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
+{
+ adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
+}
+
+
+static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
+{
+ /* init asci gds info */
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 3):
+ /* 9.4.3 removed all the GDS internal memory,
+ * only support GWS opcode in kernel, like barrier
+ * semaphore.etc */
+ adev->gds.gds_size = 0;
+ break;
+ default:
+ adev->gds.gds_size = 0x10000;
+ break;
+ }
+
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 3):
+ /* deprecated for 9.4.3, no usage at all */
+ adev->gds.gds_compute_max_wave_id = 0;
+ break;
+ default:
+ /* this really depends on the chip */
+ adev->gds.gds_compute_max_wave_id = 0x7ff;
+ break;
+ }
+
+ adev->gds.gws_size = 64;
+ adev->gds.oa_size = 16;
+}
+
+static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
+{
+ u32 data;
+
+ if (!bitmap)
+ return;
+
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
+static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
+{
+ u32 data, mask;
+
+ data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_GC_SHADER_ARRAY_CONFIG);
+ data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG);
+
+ data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+ data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+
+ mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
+
+ return (~data) & mask;
+}
+
+static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+ struct amdgpu_cu_info *cu_info)
+{
+ int i, j, k, counter, active_cu_number = 0;
+ u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+ unsigned disable_masks[4 * 4];
+
+ if (!adev || !cu_info)
+ return -EINVAL;
+
+ /*
+ * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
+ */
+ if (adev->gfx.config.max_shader_engines *
+ adev->gfx.config.max_sh_per_se > 16)
+ return -EINVAL;
+
+ amdgpu_gfx_parse_disable_cu(disable_masks,
+ adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ mask = 1;
+ ao_bitmap = 0;
+ counter = 0;
+ gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 0);
+ gfx_v9_4_3_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
+ bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev);
+
+ /*
+ * The bitmap(and ao_cu_bitmap) in cu_info structure is
+ * 4x4 size array, and it's usually suitable for Vega
+ * ASICs which has 4*2 SE/SH layout.
+ * But for Arcturus, SE/SH layout is changed to 8*1.
+ * To mostly reduce the impact, we make it compatible
+ * with current bitmap array as below:
+ * SE4,SH0 --> bitmap[0][1]
+ * SE5,SH0 --> bitmap[1][1]
+ * SE6,SH0 --> bitmap[2][1]
+ * SE7,SH0 --> bitmap[3][1]
+ */
+ cu_info->bitmap[i % 4][j + i / 4] = bitmap;
+
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+ if (bitmap & mask) {
+ if (counter < adev->gfx.config.max_cu_per_sh)
+ ao_bitmap |= mask;
+ counter++;
+ }
+ mask <<= 1;
+ }
+ active_cu_number += counter;
+ if (i < 2 && j < 2)
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
+ }
+ }
+ gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ cu_info->number = active_cu_number;
+ cu_info->ao_cu_mask = ao_cu_mask;
+ cu_info->simd_per_cu = NUM_SIMD_PER_CU;
+
+ return 0;
+}
+
+const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 9,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &gfx_v9_4_3_ip_funcs,
+};
+
+static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t tmp_mask;
+ int i, r;
+
+ /* TODO : Initialize golden regs */
+ /* gfx_v9_4_3_init_golden_registers(adev); */
+
+ tmp_mask = inst_mask;
+ for_each_inst(i, tmp_mask)
+ gfx_v9_4_3_xcc_constants_init(adev, i);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ tmp_mask = inst_mask;
+ for_each_inst(i, tmp_mask) {
+ r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
+ if (r)
+ return r;
+ }
+ }
+
+ tmp_mask = inst_mask;
+ for_each_inst(i, tmp_mask) {
+ r = gfx_v9_4_3_xcc_cp_resume(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ for_each_inst(i, inst_mask)
+ gfx_v9_4_3_xcc_fini(adev, i);
+
+ return 0;
+}
+
+struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
+ .suspend = &gfx_v9_4_3_xcp_suspend,
+ .resume = &gfx_v9_4_3_xcp_resume
+};
+
+struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = {
+ .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
+ .query_ras_error_status = &gfx_v9_4_3_query_ras_error_status,
+ .reset_ras_error_status = &gfx_v9_4_3_reset_ras_error_status,
+};
+
+struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
+ .ras_block = {
+ .hw_ops = &gfx_v9_4_3_ras_ops,
+ },
+ .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h
index 84e69701b81a..42d67ee0e7ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h
@@ -24,7 +24,8 @@
#ifndef __GFX_V9_4_3_H__
#define __GFX_V9_4_3_H__
-extern const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs;
-extern const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs;
+extern const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block;
+
+extern struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs;
#endif /* __GFX_V9_4_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index ab2325f6c7ac..cdc290a474a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -40,7 +40,7 @@ static void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev,
uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
@@ -103,7 +103,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- /*
+ /*
* Raven2 has a HW issue that it is unable to use the
* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
* So here is the workaround that increase system
@@ -247,8 +247,8 @@ static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- unsigned num_level, block_size;
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ unsigned int num_level, block_size;
uint32_t tmp;
int i;
@@ -307,8 +307,8 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- unsigned i;
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ unsigned int i;
for (i = 0 ; i < 18; ++i) {
WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
@@ -338,7 +338,7 @@ static int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
u32 tmp;
u32 i;
@@ -375,6 +375,7 @@ static void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
+
tmp = RREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
@@ -411,7 +412,7 @@ static void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
static void gfxhub_v1_0_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(GC, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index c59c6c85fbff..0834af771549 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -21,6 +21,7 @@
*
*/
#include "amdgpu.h"
+#include "amdgpu_xcp.h"
#include "gfxhub_v1_2.h"
#include "gfxhub_v1_1.h"
@@ -35,227 +36,288 @@
static u64 gfxhub_v1_2_get_mc_fb_offset(struct amdgpu_device *adev)
{
- return (u64)RREG32_SOC15(GC, 0, regMC_VM_FB_OFFSET) << 24;
+ return (u64)RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_FB_OFFSET) << 24;
+}
+
+static void gfxhub_v1_2_xcc_setup_vm_pt_regs(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint64_t page_table_base,
+ uint32_t xcc_mask)
+{
+ struct amdgpu_vmhub *hub;
+ int i;
+
+ for_each_inst(i, xcc_mask) {
+ hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ hub->ctx_addr_distance * vmid,
+ lower_32_bits(page_table_base));
+
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ hub->ctx_addr_distance * vmid,
+ upper_32_bits(page_table_base));
+ }
}
static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
-
- WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- hub->ctx_addr_distance * vmid,
- lower_32_bits(page_table_base));
+ uint32_t xcc_mask;
- WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- hub->ctx_addr_distance * vmid,
- upper_32_bits(page_table_base));
+ xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
+ gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, vmid, page_table_base, xcc_mask);
}
-static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev)
+static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
+ uint32_t xcc_mask)
{
uint64_t pt_base;
+ int i;
if (adev->gmc.pdb0_bo)
pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
else
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
- gfxhub_v1_2_setup_vm_pt_regs(adev, 0, pt_base);
+ gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, 0, pt_base, xcc_mask);
/* If use GART for FB translation, vmid0 page table covers both
* vram and system memory (gart)
*/
- if (adev->gmc.pdb0_bo) {
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
-
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->gmc.gart_end >> 12));
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->gmc.gart_end >> 44));
- } else {
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.gart_start >> 12));
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.gart_start >> 44));
-
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->gmc.gart_end >> 12));
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->gmc.gart_end >> 44));
+ for_each_inst(i, xcc_mask) {
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.fb_start >> 12));
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.fb_start >> 44));
+
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ } else {
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ }
}
}
-static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev)
+static void
+gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
+ uint32_t xcc_mask)
{
uint64_t value;
uint32_t tmp;
+ int i;
- /* Program the AGP BAR */
- WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BASE, 0);
- WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
- WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
-
- if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
- /* Program the system aperture low logical page number. */
- WREG32_SOC15_RLC(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
-
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- /*
- * Raven2 has a HW issue that it is unable to use the
- * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
- * So here is the workaround that increase system
- * aperture high address (add 1) to get rid of the VM
- * fault and hardware hang.
- */
- WREG32_SOC15_RLC(GC, 0,
- regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max((adev->gmc.fb_end >> 18) + 0x1,
- adev->gmc.agp_end >> 18));
- else
- WREG32_SOC15_RLC(GC, 0,
- regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
-
- /* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
- WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
- (u32)(value >> 12));
- WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
- (u32)(value >> 44));
-
- /* Program "protection fault". */
- WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
- (u32)(adev->dummy_page_addr >> 12));
- WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
- (u32)((u64)adev->dummy_page_addr >> 44));
-
- tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
- ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
- WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
- }
-
- /* In the case squeezing vram into GART aperture, we don't use
- * FB aperture and AGP aperture. Disable them.
- */
- if (adev->gmc.pdb0_bo) {
- WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_TOP, 0);
- WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
- WREG32_SOC15(GC, 0, regMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, regMC_VM_AGP_BOT, 0xFFFFFF);
- WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
- WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
+ for_each_inst(i, xcc_mask) {
+ /* Program the AGP BAR */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BASE, 0);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
+ if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the
+ * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
+ * So here is the workaround that increase system
+ * aperture high address (add 1) to get rid of the VM
+ * fault and hardware hang.
+ */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i),
+ regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max((adev->gmc.fb_end >> 18) + 0x1,
+ adev->gmc.agp_end >> 18));
+ else
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i),
+ regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+ /* Set default page address. */
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
+ WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
+ }
+
+ /* In the case squeezing vram into GART aperture, we don't use
+ * FB aperture and AGP aperture. Disable them.
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
+ }
}
}
-static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev)
+static void gfxhub_v1_2_xcc_init_tlb_regs(struct amdgpu_device *adev,
+ uint32_t xcc_mask)
{
uint32_t tmp;
+ int i;
- /* Setup TLB control */
- tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL);
-
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- ENABLE_ADVANCED_DRIVER_MODEL, 1);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- MTYPE, MTYPE_UC);/* XXX for emulation. */
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
-
- WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ for_each_inst(i, xcc_mask) {
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_L1_TLB, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
}
-static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev)
+static void gfxhub_v1_2_xcc_init_cache_regs(struct amdgpu_device *adev,
+ uint32_t xcc_mask)
{
uint32_t tmp;
+ int i;
- /* Setup L2 cache */
- tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
- /* XXX for emulation, Refer to closed source code.*/
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
- 0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
- WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL, tmp);
-
- tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL2);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
- WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL2, tmp);
-
- tmp = regVM_L2_CNTL3_DEFAULT;
- if (adev->gmc.translate_further) {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
- L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
- } else {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
- L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ for_each_inst(i, xcc_mask) {
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
+ /* XXX for emulation, Refer to closed source code.*/
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
+ 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL, tmp);
+
+ tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL2, tmp);
+
+ tmp = regVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL3, tmp);
+
+ tmp = regVM_L2_CNTL4_DEFAULT;
+ /* For AMD APP APUs setup WC memory */
+ if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ }
+ WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL4, tmp);
}
- WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL3, tmp);
-
- tmp = regVM_L2_CNTL4_DEFAULT;
- if (adev->gmc.xgmi.connected_to_cpu) {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
- } else {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
- }
- WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL4, tmp);
}
-static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev)
+static void gfxhub_v1_2_xcc_enable_system_domain(struct amdgpu_device *adev,
+ uint32_t xcc_mask)
{
uint32_t tmp;
+ int i;
- tmp = RREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
- adev->gmc.vmid0_page_table_depth);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
- adev->gmc.vmid0_page_table_block_size);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL, tmp);
+ for_each_inst(i, xcc_mask) {
+ tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
+ adev->gmc.vmid0_page_table_depth);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
+ adev->gmc.vmid0_page_table_block_size);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL, tmp);
+ }
}
-static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev)
+static void
+gfxhub_v1_2_xcc_disable_identity_aperture(struct amdgpu_device *adev,
+ uint32_t xcc_mask)
{
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
- 0XFFFFFFFF);
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
- 0x0000000F);
-
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
- 0);
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
- 0);
-
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
+ int i;
+ for_each_inst(i, xcc_mask) {
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+ 0XFFFFFFFF);
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
+ 0x0000000F);
+
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
+ 0);
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
+ 0);
+
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
+ WREG32_SOC15(GC, GET_INST(GC, i),
+ regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
+ }
}
-static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev)
+static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
+ uint32_t xcc_mask)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- unsigned num_level, block_size;
+ struct amdgpu_vmhub *hub;
+ unsigned int num_level, block_size;
uint32_t tmp;
- int i;
+ int i, j;
num_level = adev->vm_manager.num_level;
block_size = adev->vm_manager.block_size;
@@ -264,124 +326,186 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev)
else
block_size -= 9;
- for (i = 0; i <= 14; i++) {
- tmp = RREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL, i);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
- num_level);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
- 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_BLOCK_SIZE,
- block_size);
- /* Send no-retry XNACK on fault to suppress VM fault storm.
- * On Aldebaran, XNACK can be enabled in the SQ per-process.
- * Retry faults need to be enabled for that to work.
- */
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !adev->gmc.noretry ||
- adev->asic_type == CHIP_ALDEBARAN);
- WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL,
- i * hub->ctx_distance, tmp);
- WREG32_SOC15_OFFSET(GC, 0,
- regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
- i * hub->ctx_addr_distance, 0);
- WREG32_SOC15_OFFSET(GC, 0,
- regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
- i * hub->ctx_addr_distance, 0);
- WREG32_SOC15_OFFSET(GC, 0,
- regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
- i * hub->ctx_addr_distance,
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32_SOC15_OFFSET(GC, 0,
- regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
- i * hub->ctx_addr_distance,
- upper_32_bits(adev->vm_manager.max_pfn - 1));
+ for_each_inst(j, xcc_mask) {
+ hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
+ for (i = 0; i <= 14; i++) {
+ tmp = RREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ num_level);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
+ /* Send no-retry XNACK on fault to suppress VM fault storm.
+ * On 9.4.2 and 9.4.3, XNACK can be enabled in
+ * the SQ per-process.
+ * Retry faults need to be enabled for that to work.
+ */
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !adev->gmc.noretry ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3));
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
+ i * hub->ctx_distance, tmp);
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
+ regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
+ regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
+ regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
+ regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
+ }
}
}
-static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev)
+static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev,
+ uint32_t xcc_mask)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- unsigned i;
-
- for (i = 0 ; i < 18; ++i) {
- WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
- i * hub->eng_addr_distance, 0xffffffff);
- WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
- i * hub->eng_addr_distance, 0x1f);
+ struct amdgpu_vmhub *hub;
+ unsigned int i, j;
+
+ for_each_inst(j, xcc_mask) {
+ hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
+
+ for (i = 0 ; i < 18; ++i) {
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ i * hub->eng_addr_distance, 0xffffffff);
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ i * hub->eng_addr_distance, 0x1f);
+ }
}
}
-static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev)
+static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev,
+ uint32_t xcc_mask)
{
- if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) {
- /*
- * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
- * VF copy registers so vbios post doesn't program them, for
- * SRIOV driver need to program them
- */
- WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
- gfxhub_v1_2_init_gart_aperture_regs(adev);
- gfxhub_v1_2_init_system_aperture_regs(adev);
- gfxhub_v1_2_init_tlb_regs(adev);
+ gfxhub_v1_2_xcc_init_gart_aperture_regs(adev, xcc_mask);
+ gfxhub_v1_2_xcc_init_system_aperture_regs(adev, xcc_mask);
+ gfxhub_v1_2_xcc_init_tlb_regs(adev, xcc_mask);
if (!amdgpu_sriov_vf(adev))
- gfxhub_v1_2_init_cache_regs(adev);
+ gfxhub_v1_2_xcc_init_cache_regs(adev, xcc_mask);
- gfxhub_v1_2_enable_system_domain(adev);
+ gfxhub_v1_2_xcc_enable_system_domain(adev, xcc_mask);
if (!amdgpu_sriov_vf(adev))
- gfxhub_v1_2_disable_identity_aperture(adev);
- gfxhub_v1_2_setup_vmid_config(adev);
- gfxhub_v1_2_program_invalidation(adev);
+ gfxhub_v1_2_xcc_disable_identity_aperture(adev, xcc_mask);
+ gfxhub_v1_2_xcc_setup_vmid_config(adev, xcc_mask);
+ gfxhub_v1_2_xcc_program_invalidation(adev, xcc_mask);
return 0;
}
+static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev)
+{
+ uint32_t xcc_mask;
+
+ xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
+ return gfxhub_v1_2_xcc_gart_enable(adev, xcc_mask);
+}
+
+static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev,
+ uint32_t xcc_mask)
+{
+ struct amdgpu_vmhub *hub;
+ u32 tmp;
+ u32 i, j;
+
+ for_each_inst(j, xcc_mask) {
+ hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
+ /* Disable all tables */
+ for (i = 0; i < 16; i++)
+ WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT0_CNTL,
+ i * hub->ctx_distance, 0);
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp,
+ MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL,
+ 0);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp);
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
+ WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
+ }
+}
+
static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ uint32_t xcc_mask;
+
+ xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
+ gfxhub_v1_2_xcc_gart_disable(adev, xcc_mask);
+}
+
+static void gfxhub_v1_2_xcc_set_fault_enable_default(struct amdgpu_device *adev,
+ bool value,
+ uint32_t xcc_mask)
+{
u32 tmp;
- u32 i;
-
- /* Disable all tables */
- for (i = 0; i < 16; i++)
- WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_CNTL,
- i * hub->ctx_distance, 0);
-
- /* Setup TLB control */
- tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
- tmp = REG_SET_FIELD(tmp,
- MC_VM_MX_L1_TLB_CNTL,
- ENABLE_ADVANCED_DRIVER_MODEL,
- 0);
- WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
-
- /* Setup L2 cache */
- tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(GC, 0, regVM_L2_CNTL, tmp);
- WREG32_SOC15(GC, 0, regVM_L2_CNTL3, 0);
+ int i;
+
+ for_each_inst(i, xcc_mask) {
+ tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp,
+ VM_L2_PROTECTION_FAULT_CNTL,
+ TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+ WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL, tmp);
+ }
}
/**
@@ -393,72 +517,100 @@ static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev)
static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
- u32 tmp;
- tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp,
- VM_L2_PROTECTION_FAULT_CNTL,
- TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
- value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- if (!value) {
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- CRASH_ON_NO_RETRY_FAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- CRASH_ON_RETRY_FAULT, 1);
- }
- WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
+ uint32_t xcc_mask;
+
+ xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
+ gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, xcc_mask);
}
-static void gfxhub_v1_2_init(struct amdgpu_device *adev)
+static void gfxhub_v1_2_xcc_init(struct amdgpu_device *adev, uint32_t xcc_mask)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub;
+ int i;
+
+ for_each_inst(i, xcc_mask) {
+ hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
- hub->ctx0_ptb_addr_lo32 =
- SOC15_REG_OFFSET(GC, 0,
+ hub->ctx0_ptb_addr_lo32 =
+ SOC15_REG_OFFSET(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
- hub->ctx0_ptb_addr_hi32 =
- SOC15_REG_OFFSET(GC, 0,
+ hub->ctx0_ptb_addr_hi32 =
+ SOC15_REG_OFFSET(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
- hub->vm_inv_eng0_sem =
- SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_SEM);
- hub->vm_inv_eng0_req =
- SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_REQ);
- hub->vm_inv_eng0_ack =
- SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ACK);
- hub->vm_context0_cntl =
- SOC15_REG_OFFSET(GC, 0, regVM_CONTEXT0_CNTL);
- hub->vm_l2_pro_fault_status =
- SOC15_REG_OFFSET(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS);
- hub->vm_l2_pro_fault_cntl =
- SOC15_REG_OFFSET(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL);
-
- hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
- hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
- regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ;
- hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
- regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_SEM);
+ hub->vm_inv_eng0_req =
+ SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_REQ);
+ hub->vm_inv_eng0_ack =
+ SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_ACK);
+ hub->vm_context0_cntl =
+ SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
+ hub->vm_l2_pro_fault_status =
+ SOC15_REG_OFFSET(GC, GET_INST(GC, i),
+ regVM_L2_PROTECTION_FAULT_STATUS);
+ hub->vm_l2_pro_fault_cntl =
+ SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
+
+ hub->ctx_distance = regVM_CONTEXT1_CNTL -
+ regVM_CONTEXT0_CNTL;
+ hub->ctx_addr_distance =
+ regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
+ regVM_INVALIDATE_ENG0_REQ;
+ hub->eng_addr_distance =
+ regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
+ regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+ }
}
+static void gfxhub_v1_2_init(struct amdgpu_device *adev)
+{
+ uint32_t xcc_mask;
+
+ xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
+ gfxhub_v1_2_xcc_init(adev, xcc_mask);
+}
+
+static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev)
+{
+ u32 max_num_physical_nodes;
+ u32 max_physical_node_id;
+ u32 xgmi_lfb_cntl;
+ u32 max_region;
+ u64 seg_size;
+
+ xgmi_lfb_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_CNTL);
+ seg_size = REG_GET_FIELD(
+ RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_SIZE),
+ MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
+ max_region =
+ REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
+
+
+
+ max_num_physical_nodes = 8;
+ max_physical_node_id = 7;
+
+ /* PF_MAX_REGION=0 means xgmi is disabled */
+ if (max_region || adev->gmc.xgmi.connected_to_cpu) {
+ adev->gmc.xgmi.num_physical_nodes = max_region + 1;
+
+ if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
+ return -EINVAL;
+
+ adev->gmc.xgmi.physical_node_id =
+ REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL,
+ PF_LFB_REGION);
+
+ if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
+ return -EINVAL;
+
+ adev->gmc.xgmi.node_segment_size = seg_size;
+ }
+
+ return 0;
+}
const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
@@ -467,5 +619,38 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
.gart_disable = gfxhub_v1_2_gart_disable,
.set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
.init = gfxhub_v1_2_init,
- .get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
+ .get_xgmi_info = gfxhub_v1_2_get_xgmi_info,
+};
+
+static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool value;
+
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+ value = false;
+ else
+ value = true;
+
+ gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, inst_mask);
+
+ if (!amdgpu_sriov_vf(adev))
+ return gfxhub_v1_2_xcc_gart_enable(adev, inst_mask);
+
+ return 0;
+}
+
+static int gfxhub_v1_2_xcp_suspend(void *handle, uint32_t inst_mask)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!amdgpu_sriov_vf(adev))
+ gfxhub_v1_2_xcc_gart_disable(adev, inst_mask);
+
+ return 0;
+}
+
+struct amdgpu_xcp_ip_funcs gfxhub_v1_2_xcp_funcs = {
+ .suspend = &gfxhub_v1_2_xcp_suspend,
+ .resume = &gfxhub_v1_2_xcp_resume
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h
index e2d508f5a7ee..997e9f90c990 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h
@@ -26,4 +26,6 @@
extern const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs;
+extern struct amdgpu_xcp_ip_funcs gfxhub_v1_2_xcp_funcs;
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 9b3a02527318..a041c6c970e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -31,7 +31,7 @@
#include "soc15_common.h"
-static const char *gfxhub_client_ids[] = {
+static const char * const gfxhub_client_ids[] = {
"CB/DB",
"Reserved",
"GE1",
@@ -120,7 +120,7 @@ static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
@@ -282,7 +282,7 @@ static void gfxhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
int i;
uint32_t tmp;
@@ -331,8 +331,8 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- unsigned i;
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ unsigned int i;
for (i = 0 ; i < 18; ++i) {
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
@@ -360,7 +360,7 @@ static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
u32 tmp;
u32 i;
@@ -393,6 +393,7 @@ static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
+
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
@@ -433,7 +434,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = {
static void gfxhub_v2_0_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(GC, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 4aacbbec31e2..7708d5ded7b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -34,7 +34,7 @@
#define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP 0x16f8
#define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP_BASE_IDX 0
-static const char *gfxhub_client_ids[] = {
+static const char * const gfxhub_client_ids[] = {
"CB/DB",
"Reserved",
"GE1",
@@ -123,7 +123,7 @@ static u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev)
static void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
@@ -291,7 +291,7 @@ static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev)
static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
int i;
uint32_t tmp;
@@ -340,8 +340,8 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev)
static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- unsigned i;
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ unsigned int i;
for (i = 0 ; i < 18; ++i) {
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
@@ -381,7 +381,7 @@ static int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
static void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
u32 tmp;
u32 i;
@@ -462,7 +462,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = {
static void gfxhub_v2_1_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(GC, 0,
@@ -582,6 +582,7 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
static void gfxhub_v2_1_save_regs(struct amdgpu_device *adev)
{
int i;
+
adev->gmc.VM_L2_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
adev->gmc.VM_L2_CNTL2 = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2);
adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL);
@@ -616,6 +617,7 @@ static void gfxhub_v2_1_save_regs(struct amdgpu_device *adev)
static void gfxhub_v2_1_restore_regs(struct amdgpu_device *adev)
{
int i;
+
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, adev->gmc.VM_L2_CNTL);
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, adev->gmc.VM_L2_CNTL2);
WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL, adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL);
@@ -651,7 +653,7 @@ static void gfxhub_v2_1_restore_regs(struct amdgpu_device *adev)
static void gfxhub_v2_1_halt(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
int i;
uint32_t tmp;
int time = 1000;
@@ -679,9 +681,8 @@ static void gfxhub_v2_1_halt(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
}
- if (!time) {
+ if (!time)
DRM_WARN("failed to wait for GRBM(EA) idle\n");
- }
}
const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
index 13712640fa46..e1c76c070ba9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
@@ -30,7 +30,7 @@
#include "navi10_enum.h"
#include "soc15_common.h"
-static const char *gfxhub_client_ids[] = {
+static const char * const gfxhub_client_ids[] = {
"CB/DB",
"Reserved",
"GE1",
@@ -119,7 +119,7 @@ static u64 gfxhub_v3_0_get_mc_fb_offset(struct amdgpu_device *adev)
static void gfxhub_v3_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
@@ -290,7 +290,7 @@ static void gfxhub_v3_0_disable_identity_aperture(struct amdgpu_device *adev)
static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
int i;
uint32_t tmp;
@@ -339,8 +339,8 @@ static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev)
static void gfxhub_v3_0_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- unsigned i;
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ unsigned int i;
for (i = 0 ; i < 18; ++i) {
WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
@@ -380,7 +380,7 @@ static int gfxhub_v3_0_gart_enable(struct amdgpu_device *adev)
static void gfxhub_v3_0_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
u32 tmp;
u32 i;
@@ -463,7 +463,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v3_0_vmhub_funcs = {
static void gfxhub_v3_0_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(GC, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
index 6e0bd628c889..07f369c7a1ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
@@ -33,7 +33,7 @@
#define regGCVM_L2_CNTL4_DEFAULT 0x000000c1
#define regGCVM_L2_CNTL5_DEFAULT 0x00003fe0
-static const char *gfxhub_client_ids[] = {
+static const char * const gfxhub_client_ids[] = {
"CB/DB",
"Reserved",
"GE1",
@@ -122,7 +122,7 @@ static u64 gfxhub_v3_0_3_get_mc_fb_offset(struct amdgpu_device *adev)
static void gfxhub_v3_0_3_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
@@ -295,7 +295,7 @@ static void gfxhub_v3_0_3_disable_identity_aperture(struct amdgpu_device *adev)
static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
int i;
uint32_t tmp;
@@ -344,8 +344,8 @@ static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev)
static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- unsigned i;
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ unsigned int i;
for (i = 0 ; i < 18; ++i) {
WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
@@ -373,7 +373,7 @@ static int gfxhub_v3_0_3_gart_enable(struct amdgpu_device *adev)
static void gfxhub_v3_0_3_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
u32 tmp;
u32 i;
@@ -451,7 +451,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v3_0_3_vmhub_funcs = {
static void gfxhub_v3_0_3_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(GC, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index b213dcf8ca06..fa87a85e1017 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -53,16 +53,9 @@
#include "amdgpu_reset.h"
-#if 0
-static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
-{
- /* TODO add golden setting for hdp */
-};
-#endif
-
static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
@@ -70,13 +63,13 @@ static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
static int
gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src, unsigned type,
+ struct amdgpu_irq_src *src, unsigned int type,
enum amdgpu_interrupt_state state)
{
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
/* GFX HUB */
/* This works because this interrupt is only
* enabled at init/resume and disabled in
@@ -84,11 +77,11 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
* change over the course of suspend/resume.
*/
if (!adev->in_s0ix)
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
/* GFX HUB */
/* This works because this interrupt is only
* enabled at init/resume and disabled in
@@ -96,7 +89,7 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
* change over the course of suspend/resume.
*/
if (!adev->in_s0ix)
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
break;
default:
break;
@@ -109,9 +102,11 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
+ AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
+ struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
bool retry_fault = !!(entry->src_data[1] & 0x80);
bool write_fault = !!(entry->src_data[1] & 0x20);
- struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
struct amdgpu_task_info task_info;
uint32_t status = 0;
u64 addr;
@@ -139,7 +134,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
/* Try to handle the recoverable page faults by filling page
* tables
*/
- if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault))
return 1;
}
@@ -149,7 +144,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
* be updated to avoid reading an incorrect value due to
* the new fast GRBM interface.
*/
- if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
+ if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
(adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
RREG32(hub->vm_l2_pro_fault_status);
@@ -164,8 +159,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
dev_err(adev->dev,
- "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
- "for process %s pid %d thread %s pid %d)\n",
+ "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
entry->vmid_src ? "mmhub" : "gfxhub",
entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
@@ -212,8 +206,7 @@ static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
- return ((vmhub == AMDGPU_MMHUB_0 ||
- vmhub == AMDGPU_MMHUB_1) &&
+ return ((vmhub == AMDGPU_MMHUB0(0)) &&
(!amdgpu_sriov_vf(adev)));
}
@@ -245,11 +238,11 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 tmp;
/* Use register 17 for GART */
- const unsigned eng = 17;
+ const unsigned int eng = 17;
unsigned int i;
unsigned char hub_ip = 0;
- hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
+ hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
@@ -284,7 +277,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
* Issue a dummy read to wait for the ACK register to be cleared
* to avoid a false ACK due to the new fast GRBM interface.
*/
- if ((vmhub == AMDGPU_GFXHUB_0) &&
+ if ((vmhub == AMDGPU_GFXHUB(0)) &&
(adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng, hub_ip);
@@ -343,11 +336,11 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* For SRIOV run time, driver shouldn't access the register through MMIO
* Directly use kiq to do the vm invalidation instead
*/
- if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes &&
+ if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
down_read_trylock(&adev->reset_domain->sem)) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
- const unsigned eng = 17;
+ const unsigned int eng = 17;
u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
@@ -361,19 +354,19 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
mutex_lock(&adev->mman.gtt_window_lock);
- if (vmhub == AMDGPU_MMHUB_0) {
- gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
+ if (vmhub == AMDGPU_MMHUB0(0)) {
+ gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB0(0), 0);
mutex_unlock(&adev->mman.gtt_window_lock);
return;
}
- BUG_ON(vmhub != AMDGPU_GFXHUB_0);
+ BUG_ON(vmhub != AMDGPU_GFXHUB(0));
if (!adev->mman.buffer_funcs_enabled ||
!adev->ib_pool_ready ||
amdgpu_in_reset(adev) ||
ring->sched.ready == false) {
- gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
+ gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB(0), 0);
mutex_unlock(&adev->mman.gtt_window_lock);
return;
}
@@ -383,7 +376,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* translation. Avoid this by doing the invalidation from the SDMA
* itself.
*/
- r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity,
+ r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
&job);
@@ -415,12 +408,13 @@ error_alloc:
* @pasid: pasid to be flush
* @flush_type: the flush type
* @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
+ * @inst: is used to select which instance of KIQ to use for the invalidation
*
* Flush the TLB for the requested pasid.
*/
static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
- bool all_hub)
+ bool all_hub, uint32_t inst)
{
int vmid, i;
signed long r;
@@ -428,11 +422,11 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried_pasid;
bool ret;
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
if (amdgpu_emu_mode == 0 && ring->sched.ready) {
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
/* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
@@ -440,12 +434,12 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r) {
amdgpu_ring_undo(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
return -ETIME;
}
amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
@@ -461,12 +455,12 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
&queried_pasid);
if (ret && queried_pasid == pasid) {
if (all_hub) {
- for (i = 0; i < adev->num_vmhubs; i++)
+ for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
gmc_v10_0_flush_gpu_tlb(adev, vmid,
i, flush_type);
} else {
gmc_v10_0_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB_0, flush_type);
+ AMDGPU_GFXHUB(0), flush_type);
}
if (!adev->enable_mes)
break;
@@ -477,12 +471,12 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
}
static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
- unsigned eng = ring->vm_inv_eng;
+ unsigned int eng = ring->vm_inv_eng;
/*
* It may lose gpuvm invalidate acknowldege state across power-gating
@@ -524,8 +518,8 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
- unsigned pasid)
+static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
+ unsigned int pasid)
{
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
@@ -534,7 +528,7 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
if (ring->is_mes_queue)
return;
- if (ring->vm_hub == AMDGPU_GFXHUB_0)
+ if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
@@ -645,10 +639,10 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
}
-static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static unsigned int gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
- unsigned size;
+ unsigned int size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION;
@@ -751,6 +745,7 @@ static int gmc_v10_0_early_init(void *handle)
adev->gmc.private_aperture_start = 0x1000000000000000ULL;
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+ adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
return 0;
}
@@ -929,7 +924,8 @@ static int gmc_v10_0_sw_init(void *handle)
case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 7):
- adev->num_vmhubs = 2;
+ set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
+ set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
/*
* To fulfill 4-level page support,
* vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
@@ -971,7 +967,7 @@ static int gmc_v10_0_sw_init(void *handle)
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
- printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
+ dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
return r;
}
@@ -1075,12 +1071,12 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
if (!adev->in_s0ix)
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
adev->mmhub.funcs->set_fault_enable_default(adev, value);
- gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
+ gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
if (!adev->in_s0ix)
- gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
+ gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->gmc.gart_size >> 20),
+ (unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
return 0;
@@ -1254,8 +1250,7 @@ const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
.get_clockgating_state = gmc_v10_0_get_clockgating_state,
};
-const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v10_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 10,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index d95f9fe8f1c5..e3b76fd28d15 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -31,6 +31,8 @@
#include "umc_v8_10.h"
#include "athub/athub_3_0_0_sh_mask.h"
#include "athub/athub_3_0_0_offset.h"
+#include "dcn/dcn_3_2_0_offset.h"
+#include "dcn/dcn_3_2_0_sh_mask.h"
#include "oss/osssys_6_0_0_offset.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
#include "navi10_enum.h"
@@ -48,7 +50,7 @@
static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
@@ -56,13 +58,13 @@ static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
static int
gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src, unsigned type,
+ struct amdgpu_irq_src *src, unsigned int type,
enum amdgpu_interrupt_state state)
{
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
/* GFX HUB */
/* This works because this interrupt is only
* enabled at init/resume and disabled in
@@ -70,11 +72,11 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
* change over the course of suspend/resume.
*/
if (!adev->in_s0ix)
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
/* GFX HUB */
/* This works because this interrupt is only
* enabled at init/resume and disabled in
@@ -82,7 +84,7 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
* change over the course of suspend/resume.
*/
if (!adev->in_s0ix)
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
break;
default:
break;
@@ -95,7 +97,9 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
+ uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
+ AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
+ struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
uint32_t status = 0;
u64 addr;
@@ -108,7 +112,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
* be updated to avoid reading an incorrect value due to
* the new fast GRBM interface.
*/
- if (entry->vmid_src == AMDGPU_GFXHUB_0)
+ if (entry->vmid_src == AMDGPU_GFXHUB(0))
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
@@ -122,8 +126,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
dev_err(adev->dev,
- "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
- "for process %s pid %d thread %s pid %d)\n",
+ "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
entry->vmid_src ? "mmhub" : "gfxhub",
entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
@@ -168,7 +171,7 @@ static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
- return ((vmhub == AMDGPU_MMHUB_0) &&
+ return ((vmhub == AMDGPU_MMHUB0(0)) &&
(!amdgpu_sriov_vf(adev)));
}
@@ -196,11 +199,11 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 tmp;
/* Use register 17 for GART */
- const unsigned eng = 17;
+ const unsigned int eng = 17;
unsigned int i;
unsigned char hub_ip = 0;
- hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
+ hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
@@ -249,7 +252,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
hub->eng_distance * eng, 0, hub_ip);
/* Issue additional private vm invalidation to MMHUB */
- if ((vmhub != AMDGPU_GFXHUB_0) &&
+ if ((vmhub != AMDGPU_GFXHUB(0)) &&
(hub->vm_l2_bank_select_reserved_cid2) &&
!amdgpu_sriov_vf(adev)) {
inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
@@ -282,7 +285,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type)
{
- if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron)
+ if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
return;
/* flush hdp cache */
@@ -291,10 +294,10 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* For SRIOV run time, driver shouldn't access the register through MMIO
* Directly use kiq to do the vm invalidation instead
*/
- if ((adev->gfx.kiq.ring.sched.ready || adev->mes.ring.sched.ready) &&
+ if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
- const unsigned eng = 17;
+ const unsigned int eng = 17;
u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
@@ -307,7 +310,6 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
mutex_lock(&adev->mman.gtt_window_lock);
gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
mutex_unlock(&adev->mman.gtt_window_lock);
- return;
}
/**
@@ -317,23 +319,24 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* @pasid: pasid to be flush
* @flush_type: the flush type
* @all_hub: flush all hubs
+ * @inst: is used to select which instance of KIQ to use for the invalidation
*
* Flush the TLB for the requested pasid.
*/
static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
- bool all_hub)
+ bool all_hub, uint32_t inst)
{
int vmid, i;
signed long r;
uint32_t seq;
uint16_t queried_pasid;
bool ret;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
if (amdgpu_emu_mode == 0 && ring->sched.ready) {
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
/* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
@@ -341,12 +344,12 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r) {
amdgpu_ring_undo(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
return -ETIME;
}
amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
@@ -362,12 +365,12 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
&queried_pasid);
if (ret && queried_pasid == pasid) {
if (all_hub) {
- for (i = 0; i < adev->num_vmhubs; i++)
+ for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
gmc_v11_0_flush_gpu_tlb(adev, vmid,
i, flush_type);
} else {
gmc_v11_0_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB_0, flush_type);
+ AMDGPU_GFXHUB(0), flush_type);
}
}
}
@@ -376,12 +379,12 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
}
static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
- unsigned eng = ring->vm_inv_eng;
+ unsigned int eng = ring->vm_inv_eng;
/*
* It may lose gpuvm invalidate acknowldege state across power-gating
@@ -423,8 +426,8 @@ static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
- unsigned pasid)
+static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
+ unsigned int pasid)
{
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
@@ -433,7 +436,7 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
if (ring->is_mes_queue)
return;
- if (ring->vm_hub == AMDGPU_GFXHUB_0)
+ if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
@@ -544,9 +547,26 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
}
-static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
- return 0;
+ u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
+ unsigned int size;
+
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
+ } else {
+ u32 viewport;
+ u32 pitch;
+
+ viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
+ pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH);
+ size = (REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
+ 4);
+ }
+
+ return size;
}
static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
@@ -631,6 +651,7 @@ static int gmc_v11_0_early_init(void *handle)
adev->gmc.private_aperture_start = 0x1000000000000000ULL;
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+ adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
return 0;
}
@@ -707,9 +728,9 @@ static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
- if (amdgpu_gart_size == -1) {
+ if (amdgpu_gart_size == -1)
adev->gmc.gart_size = 512ULL << 20;
- } else
+ else
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
@@ -760,7 +781,8 @@ static int gmc_v11_0_sw_init(void *handle)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
- adev->num_vmhubs = 2;
+ set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
+ set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
/*
* To fulfill 4-level page support,
* vm size is 256TB (48bit), maximum size,
@@ -802,7 +824,7 @@ static int gmc_v11_0_sw_init(void *handle)
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
- printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
+ dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
return r;
}
@@ -867,7 +889,7 @@ static int gmc_v11_0_sw_fini(void *handle)
static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
WREG32(hub->vm_contexts_disable, 0);
return;
@@ -902,10 +924,10 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
false : true;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
- gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
+ gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->gmc.gart_size >> 20),
+ (unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b7dad4e67813..5b837a65fad2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -120,7 +120,8 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
case CHIP_HAINAN:
chip_name = "hainan";
break;
- default: BUG();
+ default:
+ BUG();
}
/* this memory configuration requires special firmware */
@@ -178,9 +179,8 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
}
/* load the MC ucode */
- for (i = 0; i < ucode_size; i++) {
+ for (i = 0; i < ucode_size; i++)
WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
- }
/* put the engine back into the active state */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
@@ -208,6 +208,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc)
{
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+
base <<= 24;
amdgpu_gmc_vram_location(adev, mc, base);
@@ -228,9 +229,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (gmc_v6_0_wait_for_idle((void *)adev)) {
+ if (gmc_v6_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
- }
if (adev->mode_info.num_crtc) {
u32 tmp;
@@ -256,9 +256,8 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
- if (gmc_v6_0_wait_for_idle((void *)adev)) {
+ if (gmc_v6_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
- }
}
static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
@@ -269,13 +268,13 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
int r;
tmp = RREG32(mmMC_ARB_RAMCFG);
- if (tmp & (1 << 11)) {
+ if (tmp & (1 << 11))
chansize = 16;
- } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
+ else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK)
chansize = 64;
- } else {
+ else
chansize = 32;
- }
+
tmp = RREG32(mmMC_SHARED_CHMAP);
switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
case 0:
@@ -352,7 +351,7 @@ static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
}
static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
uint32_t reg;
@@ -405,11 +404,11 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
}
/**
- + * gmc_v8_0_set_prt - set PRT VM fault
- + *
- + * @adev: amdgpu_device pointer
- + * @enable: enable/disable VM fault handling for PRT
- +*/
+ * gmc_v8_0_set_prt() - set PRT VM fault
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable VM fault handling for PRT
+ */
static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
{
u32 tmp;
@@ -547,7 +546,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->gmc.gart_size >> 20),
+ (unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
return 0;
}
@@ -787,15 +786,16 @@ static int gmc_v6_0_late_init(void *handle)
return 0;
}
-static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
- unsigned size;
+ unsigned int size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport = RREG32(mmVIEWPORT_SIZE);
+
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
@@ -808,12 +808,13 @@ static int gmc_v6_0_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->num_vmhubs = 1;
+ set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
if (adev->flags & AMD_IS_APU) {
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp = RREG32(mmMC_SEQ_MISC0);
+
tmp &= MC_SEQ_MISC0__MT__MASK;
adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
}
@@ -964,7 +965,7 @@ static bool gmc_v6_0_is_idle(void *handle)
static int gmc_v6_0_wait_for_idle(void *handle)
{
- unsigned i;
+ unsigned int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
@@ -995,10 +996,8 @@ static int gmc_v6_0_soft_reset(void *handle)
if (srbm_soft_reset) {
gmc_v6_0_mc_stop(adev);
- if (gmc_v6_0_wait_for_idle(adev)) {
+ if (gmc_v6_0_wait_for_idle(adev))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
- }
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
@@ -1023,7 +1022,7 @@ static int gmc_v6_0_soft_reset(void *handle)
static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
u32 tmp;
@@ -1141,8 +1140,7 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
}
-const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v6_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 6,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 402960b0174e..6a6929ac2748 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -58,16 +58,14 @@ MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
-static const u32 golden_settings_iceland_a11[] =
-{
+static const u32 golden_settings_iceland_a11[] = {
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
};
-static const u32 iceland_mgcg_cgcg_init[] =
-{
+static const u32 iceland_mgcg_cgcg_init[] = {
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
@@ -151,7 +149,8 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
case CHIP_KABINI:
case CHIP_MULLINS:
return 0;
- default: BUG();
+ default:
+ return -EINVAL;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
@@ -237,6 +236,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc)
{
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+
base <<= 24;
amdgpu_gmc_vram_location(adev, mc, base);
@@ -266,9 +266,9 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (gmc_v7_0_wait_for_idle((void *)adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
- }
+
if (adev->mode_info.num_crtc) {
/* Lockout access through VGA aperture*/
tmp = RREG32(mmVGA_HDP_CONTROL);
@@ -290,9 +290,8 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
- if (gmc_v7_0_wait_for_idle((void *)adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
- }
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
@@ -324,11 +323,11 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
/* Get VRAM informations */
tmp = RREG32(mmMC_ARB_RAMCFG);
- if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+ if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE))
chansize = 64;
- } else {
+ else
chansize = 32;
- }
+
tmp = RREG32(mmMC_SHARED_CHMAP);
switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
case 0:
@@ -419,12 +418,13 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
* @pasid: pasid to be flush
* @flush_type: type of flush
* @all_hub: flush all hubs
+ * @inst: is used to select which instance of KIQ to use for the invalidation
*
* Flush the TLB for the requested pasid.
*/
static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
- bool all_hub)
+ bool all_hub, uint32_t inst)
{
int vmid;
unsigned int tmp;
@@ -471,7 +471,7 @@ static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
}
static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
uint32_t reg;
@@ -487,8 +487,8 @@ static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
- unsigned pasid)
+static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
+ unsigned int pasid)
{
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
@@ -699,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->gmc.gart_size >> 20),
+ (unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
return 0;
}
@@ -760,7 +760,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
* Print human readable fault information (CIK).
*/
static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
- u32 addr, u32 mc_client, unsigned pasid)
+ u32 addr, u32 mc_client, unsigned int pasid)
{
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -941,6 +941,7 @@ static int gmc_v7_0_early_init(void *handle)
adev->gmc.shared_aperture_end + 1;
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+ adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
return 0;
}
@@ -955,15 +956,16 @@ static int gmc_v7_0_late_init(void *handle)
return 0;
}
-static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
- unsigned size;
+ unsigned int size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport = RREG32(mmVIEWPORT_SIZE);
+
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
@@ -977,12 +979,13 @@ static int gmc_v7_0_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->num_vmhubs = 1;
+ set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
if (adev->flags & AMD_IS_APU) {
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp = RREG32(mmMC_SEQ_MISC0);
+
tmp &= MC_SEQ_MISC0__MT__MASK;
adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
}
@@ -1151,7 +1154,7 @@ static bool gmc_v7_0_is_idle(void *handle)
static int gmc_v7_0_wait_for_idle(void *handle)
{
- unsigned i;
+ unsigned int i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1189,10 +1192,8 @@ static int gmc_v7_0_soft_reset(void *handle)
if (srbm_soft_reset) {
gmc_v7_0_mc_stop(adev);
- if (gmc_v7_0_wait_for_idle((void *)adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
- }
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
@@ -1218,7 +1219,7 @@ static int gmc_v7_0_soft_reset(void *handle)
static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
u32 tmp;
@@ -1382,8 +1383,7 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
}
-const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v7_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 7,
.minor = 0,
@@ -1391,8 +1391,7 @@ const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
.funcs = &gmc_v7_0_ip_funcs,
};
-const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v7_4_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 7,
.minor = 4,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 504c1b34dab7..5af235202513 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -64,8 +64,7 @@ MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
-static const u32 golden_settings_tonga_a11[] =
-{
+static const u32 golden_settings_tonga_a11[] = {
mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
@@ -75,34 +74,29 @@ static const u32 golden_settings_tonga_a11[] =
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
};
-static const u32 tonga_mgcg_cgcg_init[] =
-{
+static const u32 tonga_mgcg_cgcg_init[] = {
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
-static const u32 golden_settings_fiji_a10[] =
-{
+static const u32 golden_settings_fiji_a10[] = {
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
};
-static const u32 fiji_mgcg_cgcg_init[] =
-{
+static const u32 fiji_mgcg_cgcg_init[] = {
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
-static const u32 golden_settings_polaris11_a11[] =
-{
+static const u32 golden_settings_polaris11_a11[] = {
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
};
-static const u32 golden_settings_polaris10_a11[] =
-{
+static const u32 golden_settings_polaris10_a11[] = {
mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
@@ -110,19 +104,16 @@ static const u32 golden_settings_polaris10_a11[] =
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
};
-static const u32 cz_mgcg_cgcg_init[] =
-{
+static const u32 cz_mgcg_cgcg_init[] = {
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
-static const u32 stoney_mgcg_cgcg_init[] =
-{
+static const u32 stoney_mgcg_cgcg_init[] = {
mmATC_MISC_CG, 0xffffffff, 0x000c0200,
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
-static const u32 golden_settings_stoney_common[] =
-{
+static const u32 golden_settings_stoney_common[] = {
mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
};
@@ -260,7 +251,8 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_STONEY:
case CHIP_VEGAM:
return 0;
- default: BUG();
+ default:
+ return -EINVAL;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
@@ -448,9 +440,9 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (gmc_v8_0_wait_for_idle((void *)adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
- }
+
if (adev->mode_info.num_crtc) {
/* Lockout access through VGA aperture*/
tmp = RREG32(mmVGA_HDP_CONTROL);
@@ -483,9 +475,8 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
- if (gmc_v8_0_wait_for_idle((void *)adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
- }
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
@@ -517,11 +508,11 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
/* Get VRAM informations */
tmp = RREG32(mmMC_ARB_RAMCFG);
- if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+ if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE))
chansize = 64;
- } else {
+ else
chansize = 32;
- }
+
tmp = RREG32(mmMC_SHARED_CHMAP);
switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
case 0:
@@ -617,12 +608,13 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* @pasid: pasid to be flush
* @flush_type: type of flush
* @all_hub: flush all hubs
+ * @inst: is used to select which instance of KIQ to use for the invalidation
*
* Flush the TLB for the requested pasid.
*/
static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
- bool all_hub)
+ bool all_hub, uint32_t inst)
{
int vmid;
unsigned int tmp;
@@ -670,7 +662,7 @@ static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
}
static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
uint32_t reg;
@@ -686,8 +678,8 @@ static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
- unsigned pasid)
+static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
+ unsigned int pasid)
{
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
@@ -758,11 +750,11 @@ static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
}
/**
- * gmc_v8_0_set_prt - set PRT VM fault
+ * gmc_v8_0_set_prt() - set PRT VM fault
*
* @adev: amdgpu_device pointer
* @enable: enable/disable VM fault handling for PRT
-*/
+ */
static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
{
u32 tmp;
@@ -939,7 +931,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->gmc.gart_size >> 20),
+ (unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
return 0;
}
@@ -1000,7 +992,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
* Print human readable fault information (VI).
*/
static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
- u32 addr, u32 mc_client, unsigned pasid)
+ u32 addr, u32 mc_client, unsigned int pasid)
{
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1055,6 +1047,7 @@ static int gmc_v8_0_early_init(void *handle)
adev->gmc.shared_aperture_end + 1;
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+ adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
return 0;
}
@@ -1069,15 +1062,16 @@ static int gmc_v8_0_late_init(void *handle)
return 0;
}
-static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static unsigned int gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
- unsigned size;
+ unsigned int size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport = RREG32(mmVIEWPORT_SIZE);
+
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
@@ -1093,7 +1087,7 @@ static int gmc_v8_0_sw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->num_vmhubs = 1;
+ set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
if (adev->flags & AMD_IS_APU) {
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
@@ -1281,7 +1275,7 @@ static bool gmc_v8_0_is_idle(void *handle)
static int gmc_v8_0_wait_for_idle(void *handle)
{
- unsigned i;
+ unsigned int i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1317,13 +1311,15 @@ static bool gmc_v8_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
+
if (srbm_soft_reset) {
adev->gmc.srbm_soft_reset = srbm_soft_reset;
return true;
- } else {
- adev->gmc.srbm_soft_reset = 0;
- return false;
}
+
+ adev->gmc.srbm_soft_reset = 0;
+
+ return false;
}
static int gmc_v8_0_pre_soft_reset(void *handle)
@@ -1334,9 +1330,8 @@ static int gmc_v8_0_pre_soft_reset(void *handle)
return 0;
gmc_v8_0_mc_stop(adev);
- if (gmc_v8_0_wait_for_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle(adev))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
- }
return 0;
}
@@ -1385,7 +1380,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
u32 tmp;
@@ -1746,8 +1741,7 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
}
-const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v8_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 8,
.minor = 0,
@@ -1755,8 +1749,7 @@ const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
.funcs = &gmc_v8_0_ip_funcs,
};
-const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v8_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 8,
.minor = 1,
@@ -1764,8 +1757,7 @@ const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
.funcs = &gmc_v8_0_ip_funcs,
};
-const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v8_5_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 8,
.minor = 5,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2fe21cefd772..f9a5a2c0573e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -79,8 +79,9 @@
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
+#define MAX_MEM_RANGES 8
-static const char *gfxhub_client_ids[] = {
+static const char * const gfxhub_client_ids[] = {
"CB",
"DB",
"IA",
@@ -331,14 +332,12 @@ static const char *mmhub_client_ids_aldebaran[][2] = {
[384+0][1] = "OSS",
};
-static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
-{
+static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = {
SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
};
-static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
-{
+static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = {
SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
};
@@ -415,13 +414,14 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
u32 bits, i, tmp, reg;
/* Devices newer then VEGA10/12 shall have these programming
- sequences performed by PSP BL */
+ * sequences performed by PSP BL
+ */
if (adev->asic_type >= CHIP_VEGA20)
return 0;
@@ -465,7 +465,7 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
struct amdgpu_vmhub *hub;
@@ -481,7 +481,7 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- for (j = 0; j < adev->num_vmhubs; j++) {
+ for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
@@ -491,25 +491,25 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
* fini/suspend, so the overall state doesn't
* change over the course of suspend/resume.
*/
- if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0))
+ if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
continue;
- if (j == AMDGPU_GFXHUB_0)
- tmp = RREG32_SOC15_IP(GC, reg);
- else
+ if (j >= AMDGPU_MMHUB0(0))
tmp = RREG32_SOC15_IP(MMHUB, reg);
+ else
+ tmp = RREG32_SOC15_IP(GC, reg);
tmp &= ~bits;
- if (j == AMDGPU_GFXHUB_0)
- WREG32_SOC15_IP(GC, reg, tmp);
- else
+ if (j >= AMDGPU_MMHUB0(0))
WREG32_SOC15_IP(MMHUB, reg, tmp);
+ else
+ WREG32_SOC15_IP(GC, reg, tmp);
}
}
break;
case AMDGPU_IRQ_STATE_ENABLE:
- for (j = 0; j < adev->num_vmhubs; j++) {
+ for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
@@ -519,20 +519,20 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
* fini/suspend, so the overall state doesn't
* change over the course of suspend/resume.
*/
- if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0))
+ if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
continue;
- if (j == AMDGPU_GFXHUB_0)
- tmp = RREG32_SOC15_IP(GC, reg);
- else
+ if (j >= AMDGPU_MMHUB0(0))
tmp = RREG32_SOC15_IP(MMHUB, reg);
+ else
+ tmp = RREG32_SOC15_IP(GC, reg);
tmp |= bits;
- if (j == AMDGPU_GFXHUB_0)
- WREG32_SOC15_IP(GC, reg, tmp);
- else
+ if (j >= AMDGPU_MMHUB0(0))
WREG32_SOC15_IP(MMHUB, reg, tmp);
+ else
+ WREG32_SOC15_IP(GC, reg, tmp);
}
}
break;
@@ -556,11 +556,31 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
const char *hub_name;
u64 addr;
uint32_t cam_index = 0;
- int ret;
+ int ret, xcc_id = 0;
+ uint32_t node_id;
+
+ node_id = entry->node_id;
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
+ hub_name = "mmhub0";
+ hub = &adev->vmhub[AMDGPU_MMHUB0(node_id / 4)];
+ } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
+ hub_name = "mmhub1";
+ hub = &adev->vmhub[AMDGPU_MMHUB1(0)];
+ } else {
+ hub_name = "gfxhub0";
+ if (adev->gfx.funcs->ih_node_to_logical_xcc) {
+ xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
+ node_id);
+ if (xcc_id < 0)
+ xcc_id = 0;
+ }
+ hub = &adev->vmhub[xcc_id];
+ }
+
if (retry_fault) {
if (adev->irq.retry_cam_enabled) {
/* Delegate it to a different ring if the hardware hasn't
@@ -573,7 +593,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
cam_index = entry->src_data[2] & 0x3ff;
- ret = amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault);
+ ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
+ addr, write_fault);
WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
if (ret)
return 1;
@@ -595,7 +616,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
/* Try to handle the recoverable page faults by filling page
* tables
*/
- if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
+ addr, write_fault))
return 1;
}
}
@@ -603,23 +625,12 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
if (!printk_ratelimit())
return 0;
- if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
- hub_name = "mmhub0";
- hub = &adev->vmhub[AMDGPU_MMHUB_0];
- } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
- hub_name = "mmhub1";
- hub = &adev->vmhub[AMDGPU_MMHUB_1];
- } else {
- hub_name = "gfxhub0";
- hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- }
memset(&task_info, 0, sizeof(struct amdgpu_task_info));
amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
dev_err(adev->dev,
- "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
- "pasid:%u, for process %s pid %d thread %s pid %d)\n",
+ "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
hub_name, retry_fault ? "retry" : "no-retry",
entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
@@ -628,6 +639,11 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr, entry->client_id,
soc15_ih_clientid_name[entry->client_id]);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
+ node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
+ node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
+
if (amdgpu_sriov_vf(adev))
return 0;
@@ -636,7 +652,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
* be updated to avoid reading an incorrect value due to
* the new fast GRBM interface.
*/
- if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
+ if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
(adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
RREG32(hub->vm_l2_pro_fault_status);
@@ -645,11 +661,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
-
dev_err(adev->dev,
"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
- if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
+ if (entry->vmid_src == AMDGPU_GFXHUB(0)) {
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
gfxhub_client_ids[cid],
@@ -759,8 +774,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
return false;
- return ((vmhub == AMDGPU_MMHUB_0 ||
- vmhub == AMDGPU_MMHUB_1) &&
+ return ((vmhub == AMDGPU_MMHUB0(0) ||
+ vmhub == AMDGPU_MMHUB1(0)) &&
(!amdgpu_sriov_vf(adev)) &&
(!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
(adev->apu_flags & AMD_APU_IS_PICASSO))));
@@ -799,11 +814,11 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type)
{
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
- const unsigned eng = 17;
+ const unsigned int eng = 17;
u32 j, inv_req, inv_req2, tmp;
struct amdgpu_vmhub *hub;
- BUG_ON(vmhub >= adev->num_vmhubs);
+ BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
hub = &adev->vmhub[vmhub];
if (adev->gmc.xgmi.num_physical_nodes &&
@@ -816,6 +831,11 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
*/
inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ } else if (flush_type == 2 &&
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) &&
+ adev->rev_id == 0) {
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, 0);
+ inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
} else {
inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
inv_req2 = 0;
@@ -824,7 +844,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal
*/
- if (adev->gfx.kiq.ring.sched.ready &&
+ if (adev->gfx.kiq[0].ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
down_read_trylock(&adev->reset_domain->sem)) {
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
@@ -849,11 +869,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
if (use_semaphore) {
for (j = 0; j < adev->usec_timeout; j++) {
/* a read return value of 1 means semaphore acquire */
- if (vmhub == AMDGPU_GFXHUB_0)
- tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
- else
+ if (vmhub >= AMDGPU_MMHUB0(0))
tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
-
+ else
+ tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
if (tmp & 0x1)
break;
udelay(1);
@@ -864,27 +883,26 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
}
do {
- if (vmhub == AMDGPU_GFXHUB_0)
- WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
- else
+ if (vmhub >= AMDGPU_MMHUB0(0))
WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
+ else
+ WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
/*
* Issue a dummy read to wait for the ACK register to
* be cleared to avoid a false ACK due to the new fast
* GRBM interface.
*/
- if ((vmhub == AMDGPU_GFXHUB_0) &&
+ if ((vmhub == AMDGPU_GFXHUB(0)) &&
(adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
RREG32_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng);
for (j = 0; j < adev->usec_timeout; j++) {
- if (vmhub == AMDGPU_GFXHUB_0)
- tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
- else
+ if (vmhub >= AMDGPU_MMHUB0(0))
tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
-
+ else
+ tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
if (tmp & (1 << vmid))
break;
udelay(1);
@@ -900,10 +918,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* add semaphore release after invalidation,
* write with 0 means semaphore release
*/
- if (vmhub == AMDGPU_GFXHUB_0)
- WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
- else
+ if (vmhub >= AMDGPU_MMHUB0(0))
WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
+ else
+ WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
}
spin_unlock(&adev->gmc.invalidate_lock);
@@ -921,12 +939,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* @pasid: pasid to be flush
* @flush_type: the flush type
* @all_hub: flush all hubs
+ * @inst: is used to select which instance of KIQ to use for the invalidation
*
* Flush the TLB for the requested pasid.
*/
static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
- bool all_hub)
+ bool all_hub, uint32_t inst)
{
int vmid, i;
signed long r;
@@ -934,8 +953,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried_pasid;
bool ret;
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
if (amdgpu_in_reset(adev))
return -EIO;
@@ -955,24 +974,31 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (vega20_xgmi_wa)
ndw += kiq->pmf->invalidate_tlbs_size;
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[inst].ring_lock);
/* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc(ring, ndw);
if (vega20_xgmi_wa)
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, 2, all_hub);
+
+ if (flush_type == 2 &&
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) &&
+ adev->rev_id == 0)
+ kiq->pmf->kiq_invalidate_tlbs(ring,
+ pasid, 0, all_hub);
+
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r) {
amdgpu_ring_undo(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
up_read(&adev->reset_domain->sem);
return -ETIME;
}
amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
@@ -989,12 +1015,12 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
&queried_pasid);
if (ret && queried_pasid == pasid) {
if (all_hub) {
- for (i = 0; i < adev->num_vmhubs; i++)
+ for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
gmc_v9_0_flush_gpu_tlb(adev, vmid,
i, flush_type);
} else {
gmc_v9_0_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB_0, flush_type);
+ AMDGPU_GFXHUB(0), flush_type);
}
break;
}
@@ -1005,13 +1031,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
}
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
- unsigned vmid, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
- unsigned eng = ring->vm_inv_eng;
+ unsigned int eng = ring->vm_inv_eng;
/*
* It may lose gpuvm invalidate acknowldege state across power-gating
@@ -1053,17 +1079,17 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
- unsigned pasid)
+static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
+ unsigned int pasid)
{
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
/* Do nothing because there's no lut register for mmhub1. */
- if (ring->vm_hub == AMDGPU_MMHUB_1)
+ if (ring->vm_hub == AMDGPU_MMHUB1(0))
return;
- if (ring->vm_hub == AMDGPU_GFXHUB_0)
+ if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
@@ -1159,13 +1185,14 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
- unsigned int mtype;
+ struct amdgpu_vm *vm = mapping->bo_va->base.vm;
+ unsigned int mtype_local, mtype;
bool snoop = false;
+ bool is_local;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
- case IP_VERSION(9, 4, 3):
if (is_vram) {
if (bo_adev == adev) {
if (uncached)
@@ -1200,6 +1227,43 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
snoop = true;
}
break;
+ case IP_VERSION(9, 4, 3):
+ /* Only local VRAM BOs or system memory on non-NUMA APUs
+ * can be assumed to be local in their entirety. Choose
+ * MTYPE_NC as safe fallback for all system memory BOs on
+ * NUMA systems. Their MTYPE can be overridden per-page in
+ * gmc_v9_0_override_vm_pte_flags.
+ */
+ mtype_local = MTYPE_RW;
+ if (amdgpu_mtype_local == 1) {
+ DRM_INFO_ONCE("Using MTYPE_NC for local memory\n");
+ mtype_local = MTYPE_NC;
+ } else if (amdgpu_mtype_local == 2) {
+ DRM_INFO_ONCE("Using MTYPE_CC for local memory\n");
+ mtype_local = MTYPE_CC;
+ } else {
+ DRM_INFO_ONCE("Using MTYPE_RW for local memory\n");
+ }
+ is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
+ num_possible_nodes() <= 1) ||
+ (is_vram && adev == bo_adev &&
+ KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
+ snoop = true;
+ if (uncached) {
+ mtype = MTYPE_UC;
+ } else if (adev->flags & AMD_IS_APU) {
+ mtype = is_local ? mtype_local : MTYPE_NC;
+ } else {
+ /* dGPU */
+ if (is_local)
+ mtype = mtype_local;
+ else if (is_vram)
+ mtype = MTYPE_NC;
+ else
+ mtype = MTYPE_UC;
+ }
+
+ break;
default:
if (uncached || coherent)
mtype = MTYPE_UC;
@@ -1241,10 +1305,76 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
mapping, flags);
}
-static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ uint64_t addr, uint64_t *flags)
+{
+ int local_node, nid;
+
+ /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
+ * memory can use more efficient MTYPEs.
+ */
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
+ return;
+
+ /* Only direct-mapped memory allows us to determine the NUMA node from
+ * the DMA address.
+ */
+ if (!adev->ram_is_direct_mapped) {
+ dev_dbg(adev->dev, "RAM is not direct mapped\n");
+ return;
+ }
+
+ /* Only override mappings with MTYPE_NC, which is the safe default for
+ * cacheable memory.
+ */
+ if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
+ AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) {
+ dev_dbg(adev->dev, "MTYPE is not NC\n");
+ return;
+ }
+
+ /* FIXME: Only supported on native mode for now. For carve-out, the
+ * NUMA affinity of the GPU/VM needs to come from the PCI info because
+ * memory partitions are not associated with different NUMA nodes.
+ */
+ if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
+ local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
+ } else {
+ dev_dbg(adev->dev, "Only native mode APU is supported.\n");
+ return;
+ }
+
+ /* Only handle real RAM. Mappings of PCIe resources don't have struct
+ * page or NUMA nodes.
+ */
+ if (!page_is_ram(addr >> PAGE_SHIFT)) {
+ dev_dbg(adev->dev, "Page is not RAM.\n");
+ return;
+ }
+ nid = pfn_to_nid(addr >> PAGE_SHIFT);
+ dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
+ vm->mem_id, local_node, nid);
+ if (nid == local_node) {
+ uint64_t old_flags = *flags;
+ unsigned int mtype_local = MTYPE_RW;
+
+ if (amdgpu_mtype_local == 1)
+ mtype_local = MTYPE_NC;
+ else if (amdgpu_mtype_local == 2)
+ mtype_local = MTYPE_CC;
+
+ *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
+ AMDGPU_PTE_MTYPE_VG10(mtype_local);
+ dev_dbg(adev->dev, "flags updated from %llx to %llx\n",
+ old_flags, *flags);
+ }
+}
+
+static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
- unsigned size;
+ unsigned int size;
/* TODO move to DC so GMC doesn't need to hard-code DCN registers */
@@ -1283,6 +1413,27 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
return size;
}
+static enum amdgpu_memory_partition
+gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
+{
+ enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
+
+ if (adev->nbio.funcs->get_memory_partition_mode)
+ mode = adev->nbio.funcs->get_memory_partition_mode(adev,
+ supp_modes);
+
+ return mode;
+}
+
+static enum amdgpu_memory_partition
+gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return AMDGPU_NPS1_PARTITION_MODE;
+
+ return gmc_v9_0_get_memory_partition(adev, NULL);
+}
+
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
@@ -1291,7 +1442,9 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.map_mtype = gmc_v9_0_map_mtype,
.get_vm_pde = gmc_v9_0_get_vm_pde,
.get_vm_pte = gmc_v9_0_get_vm_pte,
+ .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
+ .query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
};
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -1372,6 +1525,9 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
adev->mmhub.ras = &mmhub_v1_7_ras;
break;
+ case IP_VERSION(1, 8, 0):
+ adev->mmhub.ras = &mmhub_v1_8_ras;
+ break;
default:
/* mmhub ras is not available */
break;
@@ -1419,9 +1575,13 @@ static int gmc_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
- if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS)
+ /*
+ * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
+ * in their IP discovery tables
+ */
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
adev->gmc.xgmi.supported = true;
if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
@@ -1430,6 +1590,20 @@ static int gmc_v9_0_early_init(void *handle)
adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
}
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
+ enum amdgpu_pkg_type pkg_type =
+ adev->smuio.funcs->get_pkg_type(adev);
+ /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
+ * and the APU, can be in used two possible modes:
+ * - carveout mode
+ * - native APU mode
+ * "is_app_apu" can be used to identify the APU in the native
+ * mode.
+ */
+ adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU &&
+ !pci_resource_len(adev->pdev, 0));
+ }
+
gmc_v9_0_set_gmc_funcs(adev);
gmc_v9_0_set_irq_funcs(adev);
gmc_v9_0_set_umc_funcs(adev);
@@ -1446,6 +1620,7 @@ static int gmc_v9_0_early_init(void *handle)
adev->gmc.private_aperture_start = 0x1000000000000000ULL;
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
+ adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
return 0;
}
@@ -1525,8 +1700,13 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
int r;
/* size in MB on si */
- adev->gmc.mc_vram_size =
- adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ if (!adev->gmc.is_app_apu) {
+ adev->gmc.mc_vram_size =
+ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ } else {
+ DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n");
+ adev->gmc.mc_vram_size = 0;
+ }
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU) &&
@@ -1551,7 +1731,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
*/
/* check whether both host-gpu and gpu-gpu xgmi links exist */
- if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
+ if ((!amdgpu_sriov_vf(adev) &&
+ (adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
(adev->gmc.xgmi.supported &&
adev->gmc.xgmi.connected_to_cpu)) {
adev->gmc.aper_base =
@@ -1618,12 +1799,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
AMDGPU_PTE_EXECUTABLE;
- r = amdgpu_gart_table_vram_alloc(adev);
- if (r)
- return r;
+ if (!adev->gmc.real_vram_size) {
+ dev_info(adev->dev, "Put GART in system memory for APU\n");
+ r = amdgpu_gart_table_ram_alloc(adev);
+ if (r)
+ dev_err(adev->dev, "Failed to allocate GART in system memory\n");
+ } else {
+ r = amdgpu_gart_table_vram_alloc(adev);
+ if (r)
+ return r;
- if (adev->gmc.xgmi.connected_to_cpu) {
- r = amdgpu_gmc_pdb0_alloc(adev);
+ if (adev->gmc.xgmi.connected_to_cpu)
+ r = amdgpu_gmc_pdb0_alloc(adev);
}
return r;
@@ -1644,10 +1831,191 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
+static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition mode;
+ u32 supp_modes;
+ bool valid;
+
+ mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
+
+ /* Mode detected by hardware not present in supported modes */
+ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
+ !(BIT(mode - 1) & supp_modes))
+ return false;
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ case AMDGPU_NPS1_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 1);
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 2);
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 3 ||
+ adev->gmc.num_mem_partitions == 4);
+ break;
+ default:
+ valid = false;
+ }
+
+ return valid;
+}
+
+static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
+{
+ int i;
+
+ /* Check if node with id 'nid' is present in 'node_ids' array */
+ for (i = 0; i < num_ids; ++i)
+ if (node_ids[i] == nid)
+ return true;
+
+ return false;
+}
+
+static void
+gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ int num_ranges = 0, ret, mem_groups;
+ struct amdgpu_numa_info numa_info;
+ int node_ids[MAX_MEM_RANGES];
+ int num_xcc, xcc_id;
+ uint32_t xcc_mask;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ xcc_mask = (1U << num_xcc) - 1;
+ mem_groups = hweight32(adev->aid_mask);
+
+ for_each_inst(xcc_id, xcc_mask) {
+ ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
+ if (ret)
+ continue;
+
+ if (numa_info.nid == NUMA_NO_NODE) {
+ mem_ranges[0].size = numa_info.size;
+ mem_ranges[0].numa.node = numa_info.nid;
+ num_ranges = 1;
+ break;
+ }
+
+ if (gmc_v9_0_is_node_present(node_ids, num_ranges,
+ numa_info.nid))
+ continue;
+
+ node_ids[num_ranges] = numa_info.nid;
+ mem_ranges[num_ranges].numa.node = numa_info.nid;
+ mem_ranges[num_ranges].size = numa_info.size;
+ ++num_ranges;
+ }
+
+ adev->gmc.num_mem_partitions = num_ranges;
+
+ /* If there is only partition, don't use entire size */
+ if (adev->gmc.num_mem_partitions == 1) {
+ mem_ranges[0].size = mem_ranges[0].size * (mem_groups - 1);
+ do_div(mem_ranges[0].size, mem_groups);
+ }
+}
+
+static void
+gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ enum amdgpu_memory_partition mode;
+ u32 start_addr = 0, size;
+ int i;
+
+ mode = gmc_v9_0_query_memory_partition(adev);
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ case AMDGPU_NPS1_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 2;
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ if (adev->flags & AMD_IS_APU)
+ adev->gmc.num_mem_partitions = 3;
+ else
+ adev->gmc.num_mem_partitions = 4;
+ break;
+ default:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ }
+
+ size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT;
+ size /= adev->gmc.num_mem_partitions;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+ mem_ranges[i].range.fpfn = start_addr;
+ mem_ranges[i].size = ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
+ mem_ranges[i].range.lpfn = start_addr + size - 1;
+ start_addr += size;
+ }
+
+ /* Adjust the last one */
+ mem_ranges[adev->gmc.num_mem_partitions - 1].range.lpfn =
+ (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
+ mem_ranges[adev->gmc.num_mem_partitions - 1].size =
+ adev->gmc.real_vram_size -
+ ((u64)mem_ranges[adev->gmc.num_mem_partitions - 1].range.fpfn
+ << AMDGPU_GPU_PAGE_SHIFT);
+}
+
+static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
+{
+ bool valid;
+
+ adev->gmc.mem_partitions = kzalloc(
+ MAX_MEM_RANGES * sizeof(struct amdgpu_mem_partition_info),
+ GFP_KERNEL);
+
+ if (!adev->gmc.mem_partitions)
+ return -ENOMEM;
+
+ /* TODO : Get the range from PSP/Discovery for dGPU */
+ if (adev->gmc.is_app_apu)
+ gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
+ else
+ gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+
+ if (amdgpu_sriov_vf(adev))
+ valid = true;
+ else
+ valid = gmc_v9_0_validate_partition_info(adev);
+ if (!valid) {
+ /* TODO: handle invalid case */
+ dev_WARN(adev->dev,
+ "Mem ranges not matching with hardware config");
+ }
+
+ return 0;
+}
+
+static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
+{
+ static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
+ u32 vram_info;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
+ adev->gmc.vram_vendor = vram_info & 0xF;
+ }
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+ adev->gmc.vram_width = 128 * 64;
+}
+
static int gmc_v9_0_sw_init(void *handle)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ unsigned long inst_mask = adev->aid_mask;
adev->gfxhub.funcs->init(adev);
@@ -1655,38 +2023,51 @@ static int gmc_v9_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
- r = amdgpu_atomfirmware_get_vram_info(adev,
- &vram_width, &vram_type, &vram_vendor);
- if (amdgpu_sriov_vf(adev))
- /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
- * and DF related registers is not readable, seems hardcord is the
- * only way to set the correct vram_width
- */
- adev->gmc.vram_width = 2048;
- else if (amdgpu_emu_mode != 1)
- adev->gmc.vram_width = vram_width;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
+ gmc_v9_4_3_init_vram_info(adev);
+ } else if (!adev->bios) {
+ if (adev->flags & AMD_IS_APU) {
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
+ adev->gmc.vram_width = 64 * 64;
+ } else {
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+ adev->gmc.vram_width = 128 * 64;
+ }
+ } else {
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ if (amdgpu_sriov_vf(adev))
+ /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
+ * and DF related registers is not readable, seems hardcord is the
+ * only way to set the correct vram_width
+ */
+ adev->gmc.vram_width = 2048;
+ else if (amdgpu_emu_mode != 1)
+ adev->gmc.vram_width = vram_width;
- if (!adev->gmc.vram_width) {
- int chansize, numchan;
+ if (!adev->gmc.vram_width) {
+ int chansize, numchan;
- /* hbm memory channel size */
- if (adev->flags & AMD_IS_APU)
- chansize = 64;
- else
- chansize = 128;
- if (adev->df.funcs &&
- adev->df.funcs->get_hbm_channel_number) {
- numchan = adev->df.funcs->get_hbm_channel_number(adev);
- adev->gmc.vram_width = numchan * chansize;
+ /* hbm memory channel size */
+ if (adev->flags & AMD_IS_APU)
+ chansize = 64;
+ else
+ chansize = 128;
+ if (adev->df.funcs &&
+ adev->df.funcs->get_hbm_channel_number) {
+ numchan = adev->df.funcs->get_hbm_channel_number(adev);
+ adev->gmc.vram_width = numchan * chansize;
+ }
}
- }
- adev->gmc.vram_type = vram_type;
- adev->gmc.vram_vendor = vram_vendor;
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
+ }
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
- adev->num_vmhubs = 2;
+ set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
+ set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
@@ -1702,9 +2083,8 @@ static int gmc_v9_0_sw_init(void *handle)
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 2):
- case IP_VERSION(9, 4, 3):
- adev->num_vmhubs = 2;
-
+ set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
+ set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
/*
* To fulfill 4-level page support,
@@ -1720,12 +2100,24 @@ static int gmc_v9_0_sw_init(void *handle)
adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
case IP_VERSION(9, 4, 1):
- adev->num_vmhubs = 3;
+ set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
+ set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
+ set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask);
/* Keep the vm size same with Vega20 */
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;
+ case IP_VERSION(9, 4, 3):
+ bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
+ NUM_XCC(adev->gfx.xcc_mask));
+
+ inst_mask <<= AMDGPU_MMHUB0(0);
+ bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
+
+ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
+ adev->gmc.translate_further = adev->vm_manager.num_level > 1;
+ break;
default:
break;
}
@@ -1764,10 +2156,10 @@ static int gmc_v9_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
- dma_addr_bits = adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ? 48:44;
+ dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44;
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
if (r) {
- printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
+ dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
@@ -1778,6 +2170,12 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_gmc_get_vbios_allocations(adev);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
+ r = gmc_v9_0_init_mem_ranges(adev);
+ if (r)
+ return r;
+ }
+
/* Memory manager */
r = amdgpu_bo_init(adev);
if (r)
@@ -1810,6 +2208,9 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ amdgpu_gmc_sysfs_init(adev);
+
return 0;
}
@@ -1817,10 +2218,20 @@ static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
+ amdgpu_gmc_sysfs_fini(adev);
+ adev->gmc.num_mem_partitions = 0;
+ kfree(adev->gmc.mem_partitions);
+
amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
- amdgpu_gart_table_vram_free(adev);
+ if (!adev->gmc.real_vram_size) {
+ dev_info(adev->dev, "Put GART in system memory for APU free\n");
+ amdgpu_gart_table_ram_free(adev);
+ } else {
+ amdgpu_gart_table_vram_free(adev);
+ }
amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
amdgpu_bo_fini(adev);
@@ -1902,7 +2313,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
return r;
DRM_INFO("PCIE GART of %uM enabled.\n",
- (unsigned)(adev->gmc.gart_size >> 20));
+ (unsigned int)(adev->gmc.gart_size >> 20));
if (adev->gmc.pdb0_bo)
DRM_INFO("PDB0 located at 0x%016llX\n",
(unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
@@ -1946,8 +2357,8 @@ static int gmc_v9_0_hw_init(void *handle)
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
adev->mmhub.funcs->set_fault_enable_default(adev, value);
}
- for (i = 0; i < adev->num_vmhubs; ++i) {
- if (adev->in_s0ix && (i == AMDGPU_GFXHUB_0))
+ for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
+ if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0)))
continue;
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
}
@@ -2088,8 +2499,7 @@ const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
.get_clockgating_state = gmc_v9_0_get_clockgating_state,
};
-const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
-{
+const struct amdgpu_ip_block_version gmc_v9_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 9,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index b02e1cef78a7..ec0c8f8b465a 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -494,7 +494,8 @@ static int ih_v6_0_self_irq(struct amdgpu_device *adev,
*adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work);
break;
- default: break;
+ default:
+ break;
}
return 0;
}
@@ -535,7 +536,7 @@ static int ih_v6_0_sw_init(void *handle)
* use bus address for ih ring by psp bl */
use_bus_addr =
(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
if (r)
return r;
@@ -548,7 +549,7 @@ static int ih_v6_0_sw_init(void *handle)
/* initialize ih control register offset */
ih_v6_0_init_register_offset(adev);
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
if (r)
return r;
@@ -759,8 +760,7 @@ static void ih_v6_0_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &ih_v6_0_funcs;
}
-const struct amdgpu_ip_block_version ih_v6_0_ip_block =
-{
+const struct amdgpu_ip_block_version ih_v6_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 6,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
new file mode 100644
index 000000000000..8fb05eae340a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+
+#include "oss/osssys_6_1_0_offset.h"
+#include "oss/osssys_6_1_0_sh_mask.h"
+
+#include "soc15_common.h"
+#include "ih_v6_1.h"
+
+#define MAX_REARM_RETRY 10
+
+static void ih_v6_1_set_interrupt_funcs(struct amdgpu_device *adev);
+
+/**
+ * ih_v6_1_init_register_offset - Initialize register offset for ih rings
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize register offset ih rings (IH_V6_0).
+ */
+static void ih_v6_1_init_register_offset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ /* ih ring 2 is removed
+ * ih ring and ih ring 1 are available */
+ if (adev->irq.ih.ring_size) {
+ ih_regs = &adev->irq.ih.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR);
+ ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO);
+ ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
+ }
+
+ if (adev->irq.ih1.ring_size) {
+ ih_regs = &adev->irq.ih1.ih_regs;
+ ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1);
+ ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1);
+ ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1);
+ ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1);
+ ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1);
+ ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1);
+ ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
+ }
+}
+
+/**
+ * force_update_wptr_for_self_int - Force update the wptr for self interrupt
+ *
+ * @adev: amdgpu_device pointer
+ * @threshold: threshold to trigger the wptr reporting
+ * @timeout: timeout to trigger the wptr reporting
+ * @enabled: Enable/disable timeout flush mechanism
+ *
+ * threshold input range: 0 ~ 15, default 0,
+ * real_threshold = 2^threshold
+ * timeout input range: 0 ~ 20, default 8,
+ * real_timeout = (2^timeout) * 1024 / (socclk_freq)
+ *
+ * Force update wptr for self interrupt ( >= SIENNA_CICHLID).
+ */
+static void
+force_update_wptr_for_self_int(struct amdgpu_device *adev,
+ u32 threshold, u32 timeout, bool enabled)
+{
+ u32 ih_cntl, ih_rb_cntl;
+
+ ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2);
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1);
+
+ ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
+ SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
+ ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
+ SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_USED_INT_THRESHOLD, threshold);
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
+ return;
+ } else {
+ WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+
+ WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
+}
+
+/**
+ * ih_v6_1_toggle_ring_interrupts - toggle the interrupt ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ * @enable: true - enable the interrupts, false - disable the interrupts
+ *
+ * Toggle the interrupt ring buffer (IH_V6_0)
+ */
+static int ih_v6_1_toggle_ring_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ bool enable)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ /* enable_intr field is only valid in ring0 */
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (enable) {
+ ih->enabled = true;
+ } else {
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_rptr, 0);
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ ih->enabled = false;
+ ih->rptr = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ih_v6_1_toggle_interrupts - Toggle all the available interrupt ring buffers
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable or disable interrupt ring buffers
+ *
+ * Toggle all the available interrupt ring buffers (IH_V6_0).
+ */
+static int ih_v6_1_toggle_interrupts(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
+ int i;
+ int r;
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ r = ih_v6_1_toggle_ring_interrupts(adev, ih[i], enable);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t ih_v6_1_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
+{
+ int rb_bufsz = order_base_2(ih->ring_size / 4);
+
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ MC_SPACE, ih->use_bus_addr ? 2 : 4);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_CLEAR, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
+ * value is written to memory
+ */
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_WRITEBACK_ENABLE, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
+
+ return ih_rb_cntl;
+}
+
+static uint32_t ih_v6_1_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
+/**
+ * ih_v6_1_enable_ring - enable an ih ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Enable an ih ring buffer (IH_V6_0)
+ */
+static int ih_v6_1_enable_ring(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+ uint32_t tmp;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
+ WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
+ WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
+
+ tmp = RREG32(ih_regs->ih_rb_cntl);
+ tmp = ih_v6_1_rb_cntl(ih, tmp);
+ if (ih == &adev->irq.ih)
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
+ if (ih == &adev->irq.ih1) {
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
+ }
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ if (ih == &adev->irq.ih) {
+ /* set the ih ring 0 writeback address whether it's enabled or not */
+ WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
+ WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
+ }
+
+ /* set rptr, wptr to 0 */
+ WREG32(ih_regs->ih_rb_wptr, 0);
+ WREG32(ih_regs->ih_rb_rptr, 0);
+
+ WREG32(ih_regs->ih_doorbell_rptr, ih_v6_1_doorbell_rptr(ih));
+
+ return 0;
+}
+
+/**
+ * ih_v6_1_irq_init - init and enable the interrupt ring
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate a ring buffer for the interrupt controller,
+ * enable the RLC, disable interrupts, enable the IH
+ * ring buffer and enable it.
+ * Called at device load and reume.
+ * Returns 0 for success, errors for failure.
+ */
+static int ih_v6_1_irq_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
+ u32 ih_chicken;
+ u32 tmp;
+ int ret;
+ int i;
+
+ /* disable irqs */
+ ret = ih_v6_1_toggle_interrupts(adev, false);
+ if (ret)
+ return ret;
+
+ adev->nbio.funcs->ih_control(adev);
+
+ if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
+ (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
+ if (ih[0]->use_bus_addr) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
+ ih_chicken = REG_SET_FIELD(ih_chicken,
+ IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
+ WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ih); i++) {
+ if (ih[i]->ring_size) {
+ ret = ih_v6_1_enable_ring(adev, ih[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* update doorbell range for ih ring 0 */
+ adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
+ ih[0]->doorbell_index);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
+ CLIENT18_IS_STORM_CLIENT, 1);
+ WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp);
+
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
+ WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp);
+
+ /* GC/MMHUB UTCL2 page fault interrupts are configured as
+ * MSI storm capable interrupts by deafult. The delay is
+ * used to avoid ISR being called too frequently
+ * when page fault happens on several continuous page
+ * and thus avoid MSI storm */
+ tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL);
+ tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL,
+ DELAY, 3);
+ WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
+
+ pci_set_master(adev->pdev);
+
+ /* enable interrupts */
+ ret = ih_v6_1_toggle_interrupts(adev, true);
+ if (ret)
+ return ret;
+ /* enable wptr force update for self int */
+ force_update_wptr_for_self_int(adev, 0, 8, true);
+
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
+ return 0;
+}
+
+/**
+ * ih_v6_1_irq_disable - disable interrupts
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Disable interrupts on the hw.
+ */
+static void ih_v6_1_irq_disable(struct amdgpu_device *adev)
+{
+ force_update_wptr_for_self_int(adev, 0, 8, false);
+ ih_v6_1_toggle_interrupts(adev, false);
+
+ /* Wait and acknowledge irq */
+ mdelay(1);
+}
+
+/**
+ * ih_v6_1_get_wptr - get the IH ring buffer wptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Get the IH ring buffer wptr from either the register
+ * or the writeback memory buffer. Also check for
+ * ring buffer overflow and deal with it.
+ * Returns the value of the wptr.
+ */
+static u32 ih_v6_1_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ u32 wptr, tmp;
+ struct amdgpu_ih_regs *ih_regs;
+
+ wptr = le32_to_cpu(*ih->wptr_cpu);
+ ih_regs = &ih->ih_regs;
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 32). Hopefully
+ * this should allow us to catch up.
+ */
+ tmp = (wptr + 32) & ih->ptr_mask;
+ dev_warn(adev->dev, "IH ring buffer overflow "
+ "(0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, tmp);
+ ih->rptr = tmp;
+
+ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+out:
+ return (wptr & ih->ptr_mask);
+}
+
+/**
+ * ih_v6_1_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ */
+static void ih_v6_1_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t v = 0;
+ uint32_t i = 0;
+ struct amdgpu_ih_regs *ih_regs;
+
+ ih_regs = &ih->ih_regs;
+
+ /* Rearm IRQ / re-write doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
+ * ih_v6_1_set_rptr - set the IH ring buffer rptr
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: amdgpu_ih_ring pointer
+ *
+ * Set the IH ring buffer rptr.
+ */
+static void ih_v6_1_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ struct amdgpu_ih_regs *ih_regs;
+
+ if (ih->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ *ih->rptr_cpu = ih->rptr;
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ ih_v6_1_irq_rearm(adev, ih);
+ } else {
+ ih_regs = &ih->ih_regs;
+ WREG32(ih_regs->ih_rb_rptr, ih->rptr);
+ }
+}
+
+/**
+ * ih_v6_1_self_irq - dispatch work for ring 1
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int ih_v6_1_self_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+ switch (entry->ring_id) {
+ case 1:
+ *adev->irq.ih1.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih1_work);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs ih_v6_1_self_irq_funcs = {
+ .process = ih_v6_1_self_irq,
+};
+
+static void ih_v6_1_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.self_irq.num_types = 0;
+ adev->irq.self_irq.funcs = &ih_v6_1_self_irq_funcs;
+}
+
+static int ih_v6_1_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ih_v6_1_set_interrupt_funcs(adev);
+ ih_v6_1_set_self_irq_funcs(adev);
+ return 0;
+}
+
+static int ih_v6_1_sw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool use_bus_addr;
+
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
+ &adev->irq.self_irq);
+
+ if (r)
+ return r;
+
+ /* use gpu virtual address for ih ring
+ * until ih_checken is programmed to allow
+ * use bus address for ih ring by psp bl */
+ use_bus_addr =
+ (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
+ if (r)
+ return r;
+
+ adev->irq.ih.use_doorbell = true;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+
+ adev->irq.ih1.ring_size = 0;
+ adev->irq.ih2.ring_size = 0;
+
+ /* initialize ih control register offset */
+ ih_v6_1_init_register_offset(adev);
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_init(adev);
+
+ return r;
+}
+
+static int ih_v6_1_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini_sw(adev);
+
+ return 0;
+}
+
+static int ih_v6_1_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = ih_v6_1_irq_init(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int ih_v6_1_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ih_v6_1_irq_disable(adev);
+
+ return 0;
+}
+
+static int ih_v6_1_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return ih_v6_1_hw_fini(adev);
+}
+
+static int ih_v6_1_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return ih_v6_1_hw_init(adev);
+}
+
+static bool ih_v6_1_is_idle(void *handle)
+{
+ /* todo */
+ return true;
+}
+
+static int ih_v6_1_wait_for_idle(void *handle)
+{
+ /* todo */
+ return -ETIMEDOUT;
+}
+
+static int ih_v6_1_soft_reset(void *handle)
+{
+ /* todo */
+ return 0;
+}
+
+static void ih_v6_1_update_clockgating_state(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def, field_val;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+ def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL);
+ field_val = enable ? 0 : 1;
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DYN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ REG_CLK_SOFT_OVERRIDE, field_val);
+ if (def != data)
+ WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data);
+ }
+
+ return;
+}
+
+static int ih_v6_1_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ih_v6_1_update_clockgating_state(adev,
+ state == AMD_CG_STATE_GATE);
+ return 0;
+}
+
+static void ih_v6_1_update_ih_mem_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t ih_mem_pwr_cntl;
+
+ /* Disable ih sram power cntl before switch powergating mode */
+ ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_CTRL_EN, 0);
+ WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
+
+ /* It is recommended to set mem powergating mode to DS mode */
+ if (enable) {
+ /* mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_DS_EN, 1);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_SD_EN, 0);
+ /* cam mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
+ /* re-enable power cntl */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_CTRL_EN, 1);
+ } else {
+ /* mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_DS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_SD_EN, 0);
+ /* cam mem power mode */
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0);
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
+ /* re-enable power cntl*/
+ ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
+ IH_BUFFER_MEM_POWER_CTRL_EN, 1);
+ }
+
+ WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
+}
+
+static int ih_v6_1_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_PG_STATE_GATE);
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
+ ih_v6_1_update_ih_mem_power_gating(adev, enable);
+
+ return 0;
+}
+
+static void ih_v6_1_get_clockgating_state(void *handle, u64 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
+ *flags |= AMD_CG_SUPPORT_IH_CG;
+
+ return;
+}
+
+static const struct amd_ip_funcs ih_v6_1_ip_funcs = {
+ .name = "ih_v6_1",
+ .early_init = ih_v6_1_early_init,
+ .late_init = NULL,
+ .sw_init = ih_v6_1_sw_init,
+ .sw_fini = ih_v6_1_sw_fini,
+ .hw_init = ih_v6_1_hw_init,
+ .hw_fini = ih_v6_1_hw_fini,
+ .suspend = ih_v6_1_suspend,
+ .resume = ih_v6_1_resume,
+ .is_idle = ih_v6_1_is_idle,
+ .wait_for_idle = ih_v6_1_wait_for_idle,
+ .soft_reset = ih_v6_1_soft_reset,
+ .set_clockgating_state = ih_v6_1_set_clockgating_state,
+ .set_powergating_state = ih_v6_1_set_powergating_state,
+ .get_clockgating_state = ih_v6_1_get_clockgating_state,
+};
+
+static const struct amdgpu_ih_funcs ih_v6_1_funcs = {
+ .get_wptr = ih_v6_1_get_wptr,
+ .decode_iv = amdgpu_ih_decode_iv_helper,
+ .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
+ .set_rptr = ih_v6_1_set_rptr
+};
+
+static void ih_v6_1_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.ih_funcs = &ih_v6_1_funcs;
+}
+
+const struct amdgpu_ip_block_version ih_v6_1_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ih_v6_1_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.h b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.h
new file mode 100644
index 000000000000..2232bc5cbd09
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __IH_V6_1_IH_H__
+#define __IH_V6_1_IH_H__
+
+extern const struct amdgpu_ip_block_version ih_v6_1_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index a3076eb8af6a..77595e9622da 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -437,7 +437,7 @@ static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev,
switch (entry->src_id) {
case 126:
- amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
+ amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -460,6 +460,7 @@ int jpeg_v1_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->jpeg.num_jpeg_inst = 1;
+ adev->jpeg.num_jpeg_rings = 1;
jpeg_v1_0_set_dec_ring_funcs(adev);
jpeg_v1_0_set_irq_funcs(adev);
@@ -484,15 +485,15 @@ int jpeg_v1_0_sw_init(void *handle)
if (r)
return r;
- ring = &adev->jpeg.inst->ring_dec;
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring = adev->jpeg.inst->ring_dec;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
- adev->jpeg.internal.jpeg_pitch = adev->jpeg.inst->external.jpeg_pitch =
+ adev->jpeg.internal.jpeg_pitch[0] = adev->jpeg.inst->external.jpeg_pitch[0] =
SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
return 0;
@@ -509,7 +510,7 @@ void jpeg_v1_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_ring_fini(&adev->jpeg.inst[0].ring_dec);
+ amdgpu_ring_fini(adev->jpeg.inst->ring_dec);
}
/**
@@ -522,7 +523,7 @@ void jpeg_v1_0_sw_fini(void *handle)
*/
void jpeg_v1_0_start(struct amdgpu_device *adev, int mode)
{
- struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
if (mode == 0) {
WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
@@ -579,7 +580,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
- adev->jpeg.inst->ring_dec.funcs = &jpeg_v1_0_decode_ring_vm_funcs;
+ adev->jpeg.inst->ring_dec->funcs = &jpeg_v1_0_decode_ring_vm_funcs;
DRM_INFO("JPEG decode is enabled in VM mode\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 0eddf7c824a7..1c8116d75f63 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -49,6 +49,7 @@ static int jpeg_v2_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->jpeg.num_jpeg_inst = 1;
+ adev->jpeg.num_jpeg_rings = 1;
jpeg_v2_0_set_dec_ring_funcs(adev);
jpeg_v2_0_set_irq_funcs(adev);
@@ -83,18 +84,18 @@ static int jpeg_v2_0_sw_init(void *handle)
if (r)
return r;
- ring = &adev->jpeg.inst->ring_dec;
+ ring = adev->jpeg.inst->ring_dec;
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
- adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
- adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
+ adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
+ adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
return 0;
}
@@ -129,7 +130,7 @@ static int jpeg_v2_0_sw_fini(void *handle)
static int jpeg_v2_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
@@ -312,7 +313,7 @@ static void jpeg_v2_0_enable_clock_gating(struct amdgpu_device *adev)
*/
static int jpeg_v2_0_start(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
if (adev->pm.dpm_enabled)
@@ -729,7 +730,7 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
switch (entry->src_id) {
case VCN_2_0__SRCID__JPEG_DECODE:
- amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
+ amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -791,7 +792,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
- adev->jpeg.inst->ring_dec.funcs = &jpeg_v2_0_dec_ring_vm_funcs;
+ adev->jpeg.inst->ring_dec->funcs = &jpeg_v2_0_dec_ring_vm_funcs;
DRM_INFO("JPEG decode is enabled in VM mode\n");
}
@@ -806,8 +807,7 @@ static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst->irq.funcs = &jpeg_v2_0_irq_funcs;
}
-const struct amdgpu_ip_block_version jpeg_v2_0_ip_block =
-{
+const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_JPEG,
.major = 2,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index b040f51d9aa9..aadb74de52bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -60,6 +60,7 @@ static int jpeg_v2_5_early_init(void *handle)
u32 harvest;
int i;
+ adev->jpeg.num_jpeg_rings = 1;
adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
@@ -102,13 +103,13 @@ static int jpeg_v2_5_sw_init(void *handle)
/* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
- VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq);
+ VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
if (r)
return r;
/* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
- VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq);
+ VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
if (r)
return r;
}
@@ -125,12 +126,12 @@ static int jpeg_v2_5_sw_init(void *handle)
if (adev->jpeg.harvest_config & (1 << i))
continue;
- ring = &adev->jpeg.inst[i].ring_dec;
+ ring = adev->jpeg.inst[i].ring_dec;
ring->use_doorbell = true;
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
- ring->vm_hub = AMDGPU_MMHUB_1;
+ ring->vm_hub = AMDGPU_MMHUB1(0);
else
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
sprintf(ring->name, "jpeg_dec_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
@@ -138,8 +139,8 @@ static int jpeg_v2_5_sw_init(void *handle)
if (r)
return r;
- adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
- adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
+ adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
+ adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH);
}
r = amdgpu_jpeg_ras_sw_init(adev);
@@ -186,7 +187,7 @@ static int jpeg_v2_5_hw_init(void *handle)
if (adev->jpeg.harvest_config & (1 << i))
continue;
- ring = &adev->jpeg.inst[i].ring_dec;
+ ring = adev->jpeg.inst[i].ring_dec;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
@@ -221,6 +222,9 @@ static int jpeg_v2_5_hw_fini(void *handle)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
}
return 0;
@@ -326,7 +330,7 @@ static int jpeg_v2_5_start(struct amdgpu_device *adev)
if (adev->jpeg.harvest_config & (1 << i))
continue;
- ring = &adev->jpeg.inst[i].ring_dec;
+ ring = adev->jpeg.inst[i].ring_dec;
/* disable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
@@ -569,6 +573,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -591,11 +603,7 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
switch (entry->src_id) {
case VCN_2_0__SRCID__JPEG_DECODE:
- amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
- break;
- case VCN_2_6__SRCID_DJPEG0_POISON:
- case VCN_2_6__SRCID_EJPEG0_POISON:
- amdgpu_jpeg_process_poison_irq(adev, source, entry);
+ amdgpu_fence_process(adev->jpeg.inst[ip_instance].ring_dec);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -712,10 +720,10 @@ static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
if (adev->jpeg.harvest_config & (1 << i))
continue;
if (adev->asic_type == CHIP_ARCTURUS)
- adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
+ adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v2_5_dec_ring_vm_funcs;
else /* CHIP_ALDEBARAN */
- adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_6_dec_ring_vm_funcs;
- adev->jpeg.inst[i].ring_dec.me = i;
+ adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v2_6_dec_ring_vm_funcs;
+ adev->jpeg.inst[i].ring_dec->me = i;
DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i);
}
}
@@ -725,6 +733,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
.process = jpeg_v2_5_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = {
+ .set = jpeg_v2_6_set_ras_interrupt_state,
+ .process = amdgpu_jpeg_process_poison_irq,
+};
+
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@@ -735,6 +748,9 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst[i].irq.num_types = 1;
adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
+
+ adev->jpeg.inst[i].ras_poison_irq.num_types = 1;
+ adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs;
}
}
@@ -800,6 +816,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = {
static struct amdgpu_jpeg_ras jpeg_v2_6_ras = {
.ras_block = {
.hw_ops = &jpeg_v2_6_ras_hw_ops,
+ .ras_late_init = amdgpu_jpeg_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 1c2292cc5f2c..df4440c21bbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -64,6 +64,7 @@ static int jpeg_v3_0_early_init(void *handle)
}
adev->jpeg.num_jpeg_inst = 1;
+ adev->jpeg.num_jpeg_rings = 1;
jpeg_v3_0_set_dec_ring_funcs(adev);
jpeg_v3_0_set_irq_funcs(adev);
@@ -98,18 +99,18 @@ static int jpeg_v3_0_sw_init(void *handle)
if (r)
return r;
- ring = &adev->jpeg.inst->ring_dec;
+ ring = adev->jpeg.inst->ring_dec;
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
- adev->jpeg.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
- adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
+ adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
+ adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
return 0;
}
@@ -144,7 +145,7 @@ static int jpeg_v3_0_sw_fini(void *handle)
static int jpeg_v3_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
@@ -330,7 +331,7 @@ static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device *adev)
*/
static int jpeg_v3_0_start(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
if (adev->pm.dpm_enabled)
@@ -478,7 +479,7 @@ static int jpeg_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
if (!jpeg_v3_0_is_idle(handle))
@@ -527,7 +528,7 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
switch (entry->src_id) {
case VCN_2_0__SRCID__JPEG_DECODE:
- amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
+ amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -589,7 +590,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
- adev->jpeg.inst->ring_dec.funcs = &jpeg_v3_0_dec_ring_vm_funcs;
+ adev->jpeg.inst->ring_dec->funcs = &jpeg_v3_0_dec_ring_vm_funcs;
DRM_INFO("JPEG decode is enabled in VM mode\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 77e1e64aa1d1..3eb3dcd56b57 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -58,6 +58,7 @@ static int jpeg_v4_0_early_init(void *handle)
adev->jpeg.num_jpeg_inst = 1;
+ adev->jpeg.num_jpeg_rings = 1;
jpeg_v4_0_set_dec_ring_funcs(adev);
jpeg_v4_0_set_irq_funcs(adev);
@@ -87,13 +88,13 @@ static int jpeg_v4_0_sw_init(void *handle)
/* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq);
+ VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
/* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq);
+ VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
@@ -105,10 +106,10 @@ static int jpeg_v4_0_sw_init(void *handle)
if (r)
return r;
- ring = &adev->jpeg.inst->ring_dec;
+ ring = adev->jpeg.inst->ring_dec;
ring->use_doorbell = true;
ring->doorbell_index = amdgpu_sriov_vf(adev) ? (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1);
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
@@ -116,8 +117,8 @@ static int jpeg_v4_0_sw_init(void *handle)
if (r)
return r;
- adev->jpeg.internal.jpeg_pitch = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
- adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
+ adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
+ adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
r = amdgpu_jpeg_ras_sw_init(adev);
if (r)
@@ -156,7 +157,7 @@ static int jpeg_v4_0_sw_fini(void *handle)
static int jpeg_v4_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
if (amdgpu_sriov_vf(adev)) {
@@ -202,7 +203,8 @@ static int jpeg_v4_0_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
- amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return 0;
}
@@ -363,7 +365,7 @@ static int jpeg_v4_0_enable_static_power_gating(struct amdgpu_device *adev)
*/
static int jpeg_v4_0_start(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
+ struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
if (adev->pm.dpm_enabled)
@@ -441,7 +443,7 @@ static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev)
table_size = 0;
- ring = &adev->jpeg.inst->ring_dec;
+ ring = adev->jpeg.inst->ring_dec;
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0,
regUVD_LMI_JRBC_RB_64BIT_BAR_LOW),
@@ -624,7 +626,7 @@ static int jpeg_v4_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
if (!jpeg_v4_0_is_idle(handle))
@@ -670,6 +672,14 @@ static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -678,11 +688,7 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
switch (entry->src_id) {
case VCN_4_0__SRCID__JPEG_DECODE:
- amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
- break;
- case VCN_4_0__SRCID_DJPEG0_POISON:
- case VCN_4_0__SRCID_EJPEG0_POISON:
- amdgpu_jpeg_process_poison_irq(adev, source, entry);
+ amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
@@ -744,7 +750,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
- adev->jpeg.inst->ring_dec.funcs = &jpeg_v4_0_dec_ring_vm_funcs;
+ adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_dec_ring_vm_funcs;
DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
}
@@ -753,10 +759,18 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
.process = jpeg_v4_0_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = {
+ .set = jpeg_v4_0_set_ras_interrupt_state,
+ .process = amdgpu_jpeg_process_poison_irq,
+};
+
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.num_types = 1;
adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs;
+
+ adev->jpeg.inst->ras_poison_irq.num_types = 1;
+ adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = {
@@ -811,6 +825,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = {
static struct amdgpu_jpeg_ras jpeg_v4_0_ras = {
.ras_block = {
.hw_ops = &jpeg_v4_0_ras_hw_ops,
+ .ras_late_init = amdgpu_jpeg_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
new file mode 100644
index 000000000000..15612915bb6c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -0,0 +1,1216 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "jpeg_v4_0_3.h"
+#include "mmsch_v4_0_3.h"
+
+#include "vcn/vcn_4_0_3_offset.h"
+#include "vcn/vcn_4_0_3_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+
+enum jpeg_engin_status {
+ UVD_PGFSM_STATUS__UVDJ_PWR_ON = 0,
+ UVD_PGFSM_STATUS__UVDJ_PWR_OFF = 2,
+};
+
+static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v4_0_3_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
+static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
+static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring);
+
+static int amdgpu_ih_srcid_jpeg[] = {
+ VCN_4_0__SRCID__JPEG_DECODE,
+ VCN_4_0__SRCID__JPEG1_DECODE,
+ VCN_4_0__SRCID__JPEG2_DECODE,
+ VCN_4_0__SRCID__JPEG3_DECODE,
+ VCN_4_0__SRCID__JPEG4_DECODE,
+ VCN_4_0__SRCID__JPEG5_DECODE,
+ VCN_4_0__SRCID__JPEG6_DECODE,
+ VCN_4_0__SRCID__JPEG7_DECODE
+};
+
+/**
+ * jpeg_v4_0_3_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v4_0_3_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
+
+ jpeg_v4_0_3_set_dec_ring_funcs(adev);
+ jpeg_v4_0_3_set_irq_funcs(adev);
+ jpeg_v4_0_3_set_ras_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v4_0_3_sw_init - sw init for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v4_0_3_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i, j, r, jpeg_inst;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_jpeg_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ ring->use_doorbell = true;
+ ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
+ if (!amdgpu_sriov_vf(adev)) {
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 1 + j + 9 * jpeg_inst;
+ } else {
+ if (j < 4)
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 4 + j + 32 * jpeg_inst;
+ else
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 8 + j + 32 * jpeg_inst;
+ }
+ sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch[j] =
+ regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET;
+ adev->jpeg.inst[i].external.jpeg_pitch[j] =
+ SOC15_REG_OFFSET1(
+ JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_SCRATCH0,
+ (j ? (0x40 * j - 0xc80) : 0));
+ }
+ }
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize jpeg ras block!\n");
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v4_0_3_sw_fini - sw fini for JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v4_0_3_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_jpeg_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_fini(adev);
+
+ return r;
+}
+
+static int jpeg_v4_0_3_start_sriov(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw, item_offset;
+ uint32_t init_status;
+ int i, j, jpeg_inst;
+
+ struct mmsch_v4_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v4_0_cmd_end end = { {0} };
+ struct mmsch_v4_0_3_init_header header;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ end.cmd_header.command_type =
+ MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ item_offset = header.total_size;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ table_size = 0;
+
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW);
+ MMSCH_V4_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH);
+ MMSCH_V4_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_SIZE);
+ MMSCH_V4_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4);
+
+ if (j <= 3) {
+ header.mjpegdec0[j].table_offset = item_offset;
+ header.mjpegdec0[j].init_status = 0;
+ header.mjpegdec0[j].table_size = table_size;
+ } else {
+ header.mjpegdec1[j - 4].table_offset = item_offset;
+ header.mjpegdec1[j - 4].init_status = 0;
+ header.mjpegdec1[j - 4].table_size = table_size;
+ }
+ header.total_size += table_size;
+ item_offset += table_size;
+ }
+
+ MMSCH_V4_0_INSERT_END();
+
+ /* send init table to MMSCH */
+ size = sizeof(struct mmsch_v4_0_3_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ init_status =
+ ((struct mmsch_v4_0_3_init_header *)(table_loc))->mjpegdec0[i].init_status;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP);
+
+ if (resp != 0)
+ break;
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE &&
+ init_status != MMSCH_VF_ENGINE_STATUS__PASS)
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n",
+ resp, init_status);
+
+ }
+ return 0;
+}
+
+/**
+ * jpeg_v4_0_3_hw_init - start and test JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ */
+static int jpeg_v4_0_3_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i, j, r, jpeg_inst;
+
+ if (amdgpu_sriov_vf(adev)) {
+ r = jpeg_v4_0_3_start_sriov(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ jpeg_v4_0_3_dec_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ }
+ } else {
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ ring = adev->jpeg.inst[i].ring_dec;
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(
+ adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 9 * jpeg_inst,
+ adev->jpeg.inst[i].aid_id);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (ring->use_doorbell)
+ WREG32_SOC15_OFFSET(
+ VCN, GET_INST(VCN, i),
+ regVCN_JPEG_DB_CTRL,
+ (ring->pipe ? (ring->pipe - 0x15) : 0),
+ ring->doorbell_index
+ << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
+ }
+ }
+ DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
+
+ return 0;
+}
+
+/**
+ * jpeg_v4_0_3_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v4_0_3_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret = 0;
+
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ return ret;
+}
+
+/**
+ * jpeg_v4_0_3_suspend - suspend JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v4_0_3_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = jpeg_v4_0_3_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_suspend(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v4_0_3_resume - resume JPEG block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v4_0_3_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ r = jpeg_v4_0_3_hw_init(adev);
+
+ return r;
+}
+
+static void jpeg_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+{
+ int i, jpeg_inst;
+ uint32_t data;
+
+ jpeg_inst = GET_INST(JPEG, inst_idx);
+ data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ data &= (~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1));
+ } else {
+ data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ }
+
+ data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data);
+
+ data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE);
+ data &= ~(JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK);
+ for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i)
+ data &= ~(JPEG_CGC_GATE__JPEG0_DEC_MASK << i);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data);
+}
+
+static void jpeg_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+{
+ int i, jpeg_inst;
+ uint32_t data;
+
+ jpeg_inst = GET_INST(JPEG, inst_idx);
+ data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL);
+ if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
+ data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ data |= (JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK << 1);
+ } else {
+ data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ }
+
+ data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_CTRL, data);
+
+ data = RREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE);
+ data |= (JPEG_CGC_GATE__JMCIF_MASK | JPEG_CGC_GATE__JRBBM_MASK);
+ for (i = 0; i < adev->jpeg.num_jpeg_rings; ++i)
+ data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK << i);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CGC_GATE, data);
+}
+
+/**
+ * jpeg_v4_0_3_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v4_0_3_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i, j, jpeg_inst;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
+ 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
+ SOC15_WAIT_ON_RREG(
+ JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
+ UVD_PGFSM_STATUS__UVDJ_PWR_ON
+ << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
+ UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
+
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
+ regUVD_JPEG_POWER_STATUS),
+ 0, ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* JPEG disable CGC */
+ jpeg_v4_0_3_disable_clock_gating(adev, i);
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(JPEG, jpeg_inst, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
+
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+
+ /* enable System Interrupt for JRBC */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
+ regJPEG_SYS_INT_EN),
+ JPEG_SYS_INT_EN__DJRBC0_MASK << j,
+ ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j));
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_LMI_JRBC_RB_VMID,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_CNTL,
+ reg_offset,
+ (0x00000001L | 0x00000002L));
+ WREG32_SOC15_OFFSET(
+ JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ reg_offset, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(
+ JPEG, jpeg_inst,
+ regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ reg_offset, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_RPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_CNTL,
+ reg_offset, 0x00000002L);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC0_UVD_JRBC_RB_SIZE,
+ reg_offset, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15_OFFSET(
+ JPEG, jpeg_inst, regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ reg_offset);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v4_0_3_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v4_0_3_stop(struct amdgpu_device *adev)
+{
+ int i, jpeg_inst;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ jpeg_v4_0_3_enable_clock_gating(adev, i);
+
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst,
+ regUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ WREG32_SOC15(JPEG, jpeg_inst, regUVD_PGFSM_CONFIG,
+ 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT);
+ SOC15_WAIT_ON_RREG(
+ JPEG, jpeg_inst, regUVD_PGFSM_STATUS,
+ UVD_PGFSM_STATUS__UVDJ_PWR_OFF
+ << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT,
+ UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v4_0_3_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v4_0_3_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15_OFFSET(
+ JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC0_UVD_JRBC_RB_RPTR,
+ ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
+}
+
+/**
+ * jpeg_v4_0_3_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+ else
+ return RREG32_SOC15_OFFSET(
+ JPEG, GET_INST(JPEG, ring->me),
+ regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
+}
+
+/**
+ * jpeg_v4_0_3_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me),
+ regUVD_JRBC0_UVD_JRBC_RB_WPTR,
+ (ring->pipe ? (0x40 * ring->pipe - 0xc80) :
+ 0),
+ lower_32_bits(ring->wptr));
+ }
+}
+
+/**
+ * jpeg_v4_0_3_dec_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80004000);
+}
+
+/**
+ * jpeg_v4_0_3_dec_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x62a04);
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x00004000);
+}
+
+/**
+ * jpeg_v4_0_3_dec_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @addr: address
+ * @seq: sequence number
+ * @flags: fence related flags
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned int flags)
+{
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, seq);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
+ 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+ amdgpu_ring_write(ring, 0);
+
+ if (ring->adev->jpeg.inst[ring->me].aid_id) {
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
+ 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x4);
+ } else {
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+ }
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x3fbc);
+
+ if (ring->adev->jpeg.inst[ring->me].aid_id) {
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET,
+ 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x0);
+ } else {
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+ }
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x1);
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * jpeg_v4_0_3_dec_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @job: job to retrieve vmid from
+ * @ib: indirect buffer to execute
+ * @flags: unused
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+static void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ unsigned int vmid = AMDGPU_JOB_GET_VMID(job);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, ib->length_dw);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+ amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x2);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_STATUS_INTERNAL_OFFSET,
+ 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+ amdgpu_ring_write(ring, 0x2);
+}
+
+static void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x01400200);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, val);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE3));
+ }
+ amdgpu_ring_write(ring, mask);
+}
+
+static void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr)
+{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
+ uint32_t data0, data1, mask;
+
+ pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+ /* wait for register write */
+ data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
+ data1 = lower_32_bits(pd_addr);
+ mask = 0xffffffff;
+ jpeg_v4_0_3_dec_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+static void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+{
+ uint32_t reg_offset = (reg << 2);
+
+ amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring,
+ PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+ } else {
+ amdgpu_ring_write(ring, reg_offset);
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ }
+ amdgpu_ring_write(ring, val);
+}
+
+static void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+
+ WARN_ON(ring->wptr % 2 || count % 2);
+
+ for (i = 0; i < count / 2; i++) {
+ amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+static bool jpeg_v4_0_3_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool ret = false;
+ int i, j;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
+
+ ret &= ((RREG32_SOC15_OFFSET(
+ JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC0_UVD_JRBC_STATUS,
+ reg_offset) &
+ UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ }
+ }
+
+ return ret;
+}
+
+static int jpeg_v4_0_3_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret = 0;
+ int i, j;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ unsigned int reg_offset = (j?(0x40 * j - 0xc80):0);
+
+ ret &= SOC15_WAIT_ON_RREG_OFFSET(
+ JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC0_UVD_JRBC_STATUS, reg_offset,
+ UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+ UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ }
+ }
+ return ret;
+}
+
+static int jpeg_v4_0_3_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = state == AMD_CG_STATE_GATE;
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (enable) {
+ if (!jpeg_v4_0_3_is_idle(handle))
+ return -EBUSY;
+ jpeg_v4_0_3_enable_clock_gating(adev, i);
+ } else {
+ jpeg_v4_0_3_disable_clock_gating(adev, i);
+ }
+ }
+ return 0;
+}
+
+static int jpeg_v4_0_3_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ if (state == adev->jpeg.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = jpeg_v4_0_3_stop(adev);
+ else
+ ret = jpeg_v4_0_3_start(adev);
+
+ if (!ret)
+ adev->jpeg.cur_state = state;
+
+ return ret;
+}
+
+static int jpeg_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t i, inst;
+
+ i = node_id_to_phys_map[entry->node_id];
+ DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n");
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst)
+ if (adev->jpeg.inst[inst].aid_id == i)
+ break;
+
+ if (inst >= adev->jpeg.num_jpeg_inst) {
+ dev_WARN_ONCE(adev->dev, 1,
+ "Interrupt received for unknown JPEG instance %d",
+ entry->node_id);
+ return 0;
+ }
+
+ switch (entry->src_id) {
+ case VCN_4_0__SRCID__JPEG_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]);
+ break;
+ case VCN_4_0__SRCID__JPEG1_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]);
+ break;
+ case VCN_4_0__SRCID__JPEG2_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]);
+ break;
+ case VCN_4_0__SRCID__JPEG3_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]);
+ break;
+ case VCN_4_0__SRCID__JPEG4_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]);
+ break;
+ case VCN_4_0__SRCID__JPEG5_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]);
+ break;
+ case VCN_4_0__SRCID__JPEG6_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]);
+ break;
+ case VCN_4_0__SRCID__JPEG7_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]);
+ break;
+ default:
+ DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
+ .name = "jpeg_v4_0_3",
+ .early_init = jpeg_v4_0_3_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v4_0_3_sw_init,
+ .sw_fini = jpeg_v4_0_3_sw_fini,
+ .hw_init = jpeg_v4_0_3_hw_init,
+ .hw_fini = jpeg_v4_0_3_hw_fini,
+ .suspend = jpeg_v4_0_3_suspend,
+ .resume = jpeg_v4_0_3_resume,
+ .is_idle = jpeg_v4_0_3_is_idle,
+ .wait_for_idle = jpeg_v4_0_3_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v4_0_3_set_clockgating_state,
+ .set_powergating_state = jpeg_v4_0_3_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
+ .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
+ .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v4_0_3_dec_ring_emit_vm_flush */
+ 22 + 22 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v4_0_3_dec_ring_emit_ib */
+ .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
+ .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v4_0_3_dec_ring_nop,
+ .insert_start = jpeg_v4_0_3_dec_ring_insert_start,
+ .insert_end = jpeg_v4_0_3_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ int i, j, jpeg_inst;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v4_0_3_dec_ring_vm_funcs;
+ adev->jpeg.inst[i].ring_dec[j].me = i;
+ adev->jpeg.inst[i].ring_dec[j].pipe = j;
+ }
+ jpeg_inst = GET_INST(JPEG, i);
+ adev->jpeg.inst[i].aid_id =
+ jpeg_inst / adev->jpeg.num_inst_per_aid;
+ }
+ DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = {
+ .set = jpeg_v4_0_3_set_interrupt_state,
+ .process = jpeg_v4_0_3_process_interrupt,
+};
+
+static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
+ }
+ adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs;
+}
+
+const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 4,
+ .minor = 0,
+ .rev = 3,
+ .funcs = &jpeg_v4_0_3_ip_funcs,
+};
+
+static const struct amdgpu_ras_err_status_reg_entry jpeg_v4_0_3_ue_reg_list[] = {
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG0S, regVCN_UE_ERR_STATUS_HI_JPEG0S),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG0S"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG0D, regVCN_UE_ERR_STATUS_HI_JPEG0D),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG0D"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG1S, regVCN_UE_ERR_STATUS_HI_JPEG1S),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG1S"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG1D, regVCN_UE_ERR_STATUS_HI_JPEG1D),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG1D"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG2S, regVCN_UE_ERR_STATUS_HI_JPEG2S),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG2S"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG2D, regVCN_UE_ERR_STATUS_HI_JPEG2D),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG2D"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG3S, regVCN_UE_ERR_STATUS_HI_JPEG3S),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG3S"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG3D, regVCN_UE_ERR_STATUS_HI_JPEG3D),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG3D"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG4S, regVCN_UE_ERR_STATUS_HI_JPEG4S),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG4S"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG4D, regVCN_UE_ERR_STATUS_HI_JPEG4D),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG4D"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG5S, regVCN_UE_ERR_STATUS_HI_JPEG5S),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG5S"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG5D, regVCN_UE_ERR_STATUS_HI_JPEG5D),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG5D"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG6S, regVCN_UE_ERR_STATUS_HI_JPEG6S),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG6S"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG6D, regVCN_UE_ERR_STATUS_HI_JPEG6D),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG6D"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG7S, regVCN_UE_ERR_STATUS_HI_JPEG7S),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG7S"},
+ {AMDGPU_RAS_REG_ENTRY(JPEG, 0, regVCN_UE_ERR_STATUS_LO_JPEG7D, regVCN_UE_ERR_STATUS_HI_JPEG7D),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "JPEG7D"},
+};
+
+static void jpeg_v4_0_3_inst_query_ras_error_count(struct amdgpu_device *adev,
+ uint32_t jpeg_inst,
+ void *ras_err_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
+
+ /* jpeg v4_0_3 only support uncorrectable errors */
+ amdgpu_ras_inst_query_ras_error_count(adev,
+ jpeg_v4_0_3_ue_reg_list,
+ ARRAY_SIZE(jpeg_v4_0_3_ue_reg_list),
+ NULL, 0, GET_INST(VCN, jpeg_inst),
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ &err_data->ue_count);
+}
+
+static void jpeg_v4_0_3_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_err_status)
+{
+ uint32_t i;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
+ dev_warn(adev->dev, "JPEG RAS is not supported\n");
+ return;
+ }
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++)
+ jpeg_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status);
+}
+
+static void jpeg_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device *adev,
+ uint32_t jpeg_inst)
+{
+ amdgpu_ras_inst_reset_ras_error_count(adev,
+ jpeg_v4_0_3_ue_reg_list,
+ ARRAY_SIZE(jpeg_v4_0_3_ue_reg_list),
+ GET_INST(VCN, jpeg_inst));
+}
+
+static void jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
+ dev_warn(adev->dev, "JPEG RAS is not supported\n");
+ return;
+ }
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++)
+ jpeg_v4_0_3_inst_reset_ras_error_count(adev, i);
+}
+
+static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = {
+ .query_ras_error_count = jpeg_v4_0_3_query_ras_error_count,
+ .reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count,
+};
+
+static struct amdgpu_jpeg_ras jpeg_v4_0_3_ras = {
+ .ras_block = {
+ .hw_ops = &jpeg_v4_0_3_ras_hw_ops,
+ },
+};
+
+static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.ras = &jpeg_v4_0_3_ras;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
new file mode 100644
index 000000000000..22483dc66351
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V4_0_3_H__
+#define __JPEG_V4_0_3_H__
+
+#define regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
+#define regUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x404d
+#define regUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x404e
+#define regUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x404f
+#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ab
+#define regUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ac
+#define regUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40a4
+#define regUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40a6
+#define regUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40b6
+#define regUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40b7
+#define regUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
+#define regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x42d4
+#define regUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x42d5
+#define regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
+#define regUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
+#define regUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
+#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x4043
+#define regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET 0x4094
+#define regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET 0x1bffe
+
+#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+
+extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
+
+#endif /* __JPEG_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 2e2062636d5f..eb06d749876f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -149,7 +149,7 @@ static int mes_v10_1_add_hw_queue(struct amdgpu_mes *mes,
{
struct amdgpu_device *adev = mes->adev;
union MESAPI__ADD_QUEUE mes_add_queue_pkt;
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
uint32_t vm_cntx_cntl = hub->vm_cntx_cntl;
memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
@@ -632,6 +632,8 @@ static int mes_v10_1_mqd_init(struct amdgpu_ring *ring)
uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
uint32_t tmp;
+ memset(mqd, 0, sizeof(*mqd));
+
mqd->header = 0xC0310800;
mqd->compute_pipelinestat_enable = 0x00000001;
mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
@@ -728,6 +730,7 @@ static int mes_v10_1_mqd_init(struct amdgpu_ring *ring)
/* offset: 184 - this is used for CP_HQD_GFX_CONTROL */
mqd->cp_hqd_suspend_cntl_stack_offset = tmp;
+ amdgpu_device_flush_hdp(ring->adev, NULL);
return 0;
}
@@ -797,8 +800,8 @@ static void mes_v10_1_queue_init_register(struct amdgpu_ring *ring)
static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
int r;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
@@ -812,13 +815,7 @@ static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev)
kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
- DRM_ERROR("kfq enable failed\n");
- kiq_ring->sched.ready = false;
- }
-
- return r;
+ return amdgpu_ring_test_helper(kiq_ring);
}
static int mes_v10_1_queue_init(struct amdgpu_device *adev)
@@ -863,9 +860,9 @@ static int mes_v10_1_kiq_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- spin_lock_init(&adev->gfx.kiq.ring_lock);
+ spin_lock_init(&adev->gfx.kiq[0].ring_lock);
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
ring->me = 3;
ring->pipe = 1;
@@ -891,7 +888,7 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
if (pipe == AMDGPU_MES_KIQ_PIPE)
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
else
@@ -901,6 +898,7 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev,
return 0;
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
@@ -911,10 +909,12 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev,
/* prepare MQD backup */
adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
- if (!adev->mes.mqd_backup[pipe])
+ if (!adev->mes.mqd_backup[pipe]) {
dev_warn(adev->dev,
"no memory to create MQD backup for ring %s\n",
ring->name);
+ return -ENOMEM;
+ }
return 0;
}
@@ -974,15 +974,15 @@ static int mes_v10_1_sw_fini(void *handle)
amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
- amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
- &adev->gfx.kiq.ring.mqd_gpu_addr,
- &adev->gfx.kiq.ring.mqd_ptr);
+ amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
+ &adev->gfx.kiq[0].ring.mqd_gpu_addr,
+ &adev->gfx.kiq[0].ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
&adev->mes.ring.mqd_gpu_addr,
&adev->mes.ring.mqd_ptr);
- amdgpu_ring_fini(&adev->gfx.kiq.ring);
+ amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
amdgpu_ring_fini(&adev->mes.ring);
amdgpu_mes_fini(adev);
@@ -1038,7 +1038,7 @@ static int mes_v10_1_kiq_hw_init(struct amdgpu_device *adev)
mes_v10_1_enable(adev, true);
- mes_v10_1_kiq_setting(&adev->gfx.kiq.ring);
+ mes_v10_1_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v10_1_queue_init(adev);
if (r)
@@ -1090,7 +1090,7 @@ static int mes_v10_1_hw_init(void *handle)
* MES uses KIQ ring exclusively so driver cannot access KIQ ring
* with MES enabled.
*/
- adev->gfx.kiq.ring.sched.ready = false;
+ adev->gfx.kiq[0].ring.sched.ready = false;
adev->mes.ring.sched.ready = true;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 45280f047180..6827d547042e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -164,7 +164,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
{
struct amdgpu_device *adev = mes->adev;
union MESAPI__ADD_QUEUE mes_add_queue_pkt;
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
uint32_t vm_cntx_cntl = hub->vm_cntx_cntl;
memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
@@ -202,20 +202,15 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gws_size = input->gws_size;
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
+ mes_add_queue_pkt.trap_en = input->trap_en;
+ mes_add_queue_pkt.skip_process_ctx_clear = input->skip_process_ctx_clear;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
mes_add_queue_pkt.gds_size = input->queue_size;
- if (!(((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 4) &&
- (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) &&
- (adev->ip_versions[GC_HWIP][0] <= IP_VERSION(11, 0, 3))))
- mes_add_queue_pkt.trap_en = 1;
-
- /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
- mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
- mes_add_queue_pkt.gds_size = input->queue_size;
+ mes_add_queue_pkt.exclusively_scheduled = input->exclusively_scheduled;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
@@ -339,6 +334,19 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0;
misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1;
break;
+ case MES_MISC_OP_SET_SHADER_DEBUGGER:
+ misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER;
+ misc_pkt.set_shader_debugger.process_context_addr =
+ input->set_shader_debugger.process_context_addr;
+ misc_pkt.set_shader_debugger.flags.u32all =
+ input->set_shader_debugger.flags.u32all;
+ misc_pkt.set_shader_debugger.spi_gdbg_per_vmid_cntl =
+ input->set_shader_debugger.spi_gdbg_per_vmid_cntl;
+ memcpy(misc_pkt.set_shader_debugger.tcp_watch_cntl,
+ input->set_shader_debugger.tcp_watch_cntl,
+ sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl));
+ misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en;
+ break;
default:
DRM_ERROR("unsupported misc op (%d) \n", input->op);
return -EINVAL;
@@ -704,6 +712,8 @@ static int mes_v11_0_mqd_init(struct amdgpu_ring *ring)
uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
uint32_t tmp;
+ memset(mqd, 0, sizeof(*mqd));
+
mqd->header = 0xC0310800;
mqd->compute_pipelinestat_enable = 0x00000001;
mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
@@ -778,8 +788,7 @@ static int mes_v11_0_mqd_init(struct amdgpu_ring *ring)
DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
- }
- else
+ } else
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0);
mqd->cp_hqd_pq_doorbell_control = tmp;
@@ -797,6 +806,7 @@ static int mes_v11_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_iq_timer = regCP_HQD_IQ_TIMER_DEFAULT;
mqd->cp_hqd_quantum = regCP_HQD_QUANTUM_DEFAULT;
+ amdgpu_device_flush_hdp(ring->adev, NULL);
return 0;
}
@@ -864,8 +874,8 @@ static void mes_v11_0_queue_init_register(struct amdgpu_ring *ring)
static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
int r;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
@@ -879,12 +889,7 @@ static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
- DRM_ERROR("kfq enable failed\n");
- kiq_ring->sched.ready = false;
- }
- return r;
+ return amdgpu_ring_test_helper(kiq_ring);
}
static int mes_v11_0_queue_init(struct amdgpu_device *adev,
@@ -894,7 +899,7 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
int r;
if (pipe == AMDGPU_MES_KIQ_PIPE)
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
else
@@ -961,9 +966,9 @@ static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- spin_lock_init(&adev->gfx.kiq.ring_lock);
+ spin_lock_init(&adev->gfx.kiq[0].ring_lock);
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
ring->me = 3;
ring->pipe = 1;
@@ -989,7 +994,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
if (pipe == AMDGPU_MES_KIQ_PIPE)
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
else
@@ -999,6 +1004,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
return 0;
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
@@ -1010,10 +1016,12 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
/* prepare MQD backup */
adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
- if (!adev->mes.mqd_backup[pipe])
+ if (!adev->mes.mqd_backup[pipe]) {
dev_warn(adev->dev,
"no memory to create MQD backup for ring %s\n",
ring->name);
+ return -ENOMEM;
+ }
return 0;
}
@@ -1074,15 +1082,15 @@ static int mes_v11_0_sw_fini(void *handle)
amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
- amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
- &adev->gfx.kiq.ring.mqd_gpu_addr,
- &adev->gfx.kiq.ring.mqd_ptr);
+ amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
+ &adev->gfx.kiq[0].ring.mqd_gpu_addr,
+ &adev->gfx.kiq[0].ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
&adev->mes.ring.mqd_gpu_addr,
&adev->mes.ring.mqd_ptr);
- amdgpu_ring_fini(&adev->gfx.kiq.ring);
+ amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
amdgpu_ring_fini(&adev->mes.ring);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
@@ -1175,7 +1183,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v11_0_enable(adev, true);
- mes_v11_0_kiq_setting(&adev->gfx.kiq.ring);
+ mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
@@ -1196,7 +1204,7 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
}
if (amdgpu_sriov_vf(adev)) {
- mes_v11_0_kiq_dequeue(&adev->gfx.kiq.ring);
+ mes_v11_0_kiq_dequeue(&adev->gfx.kiq[0].ring);
mes_v11_0_kiq_clear(adev);
}
@@ -1244,7 +1252,7 @@ static int mes_v11_0_hw_init(void *handle)
* MES uses KIQ ring exclusively so driver cannot access KIQ ring
* with MES enabled.
*/
- adev->gfx.kiq.ring.sched.ready = false;
+ adev->gfx.kiq[0].ring.sched.ready = false;
adev->mes.ring.sched.ready = true;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 15e7cbeae75b..fb91b31056ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -54,7 +54,7 @@ static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
@@ -229,7 +229,7 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned num_level, block_size;
uint32_t tmp;
int i;
@@ -285,7 +285,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned i;
for (i = 0; i < 18; ++i) {
@@ -338,7 +338,7 @@ static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
u32 tmp;
u32 i;
@@ -415,7 +415,7 @@ static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool
static void mmhub_v1_0_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(MMHUB, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index 73afbf2facc9..9086f2fdfaf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -54,7 +54,7 @@ static u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev)
static void mmhub_v1_7_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base));
@@ -261,7 +261,7 @@ static void mmhub_v1_7_disable_identity_aperture(struct amdgpu_device *adev)
static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned num_level, block_size;
uint32_t tmp;
int i;
@@ -319,7 +319,7 @@ static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev)
static void mmhub_v1_7_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned i;
for (i = 0; i < 18; ++i) {
@@ -348,7 +348,7 @@ static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev)
static void mmhub_v1_7_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
u32 tmp;
u32 i;
@@ -425,7 +425,7 @@ static void mmhub_v1_7_set_fault_enable_default(struct amdgpu_device *adev, bool
static void mmhub_v1_7_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(MMHUB, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 342d1702104c..784c4e077470 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -29,6 +29,7 @@
#include "soc15_common.h"
#include "soc15.h"
+#include "amdgpu_ras.h"
#define regVM_L2_CNTL3_DEFAULT 0x80100007
#define regVM_L2_CNTL4_DEFAULT 0x000000c1
@@ -53,18 +54,30 @@ static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
-
- WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base));
+ struct amdgpu_vmhub *hub;
+ u32 inst_mask;
+ int i;
- WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base));
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
+ WREG32_SOC15_OFFSET(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ hub->ctx_addr_distance * vmid,
+ lower_32_bits(page_table_base));
+
+ WREG32_SOC15_OFFSET(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ hub->ctx_addr_distance * vmid,
+ upper_32_bits(page_table_base));
+ }
}
static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
{
uint64_t pt_base;
+ u32 inst_mask;
+ int i;
if (adev->gmc.pdb0_bo)
pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
@@ -76,187 +89,248 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
/* If use GART for FB translation, vmid0 page table covers both
* vram and system memory (gart)
*/
- if (adev->gmc.pdb0_bo) {
- WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
- WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
-
- WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->gmc.gart_end >> 12));
- WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->gmc.gart_end >> 44));
-
- } else {
- WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.gart_start >> 12));
- WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.gart_start >> 44));
-
- WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->gmc.gart_end >> 12));
- WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->gmc.gart_end >> 44));
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.fb_start >> 12));
+ WREG32_SOC15(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.fb_start >> 44));
+
+ WREG32_SOC15(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+
+ } else {
+ WREG32_SOC15(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ }
}
}
static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
{
+ uint32_t tmp, inst_mask;
uint64_t value;
- uint32_t tmp;
+ int i;
- /* Program the AGP BAR */
- WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BASE, 0);
- WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
- WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ /* Program the AGP BAR */
+ WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT,
+ adev->gmc.agp_start >> 24);
+ WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
+ adev->gmc.agp_end >> 24);
- /* Program the system aperture low logical page number. */
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+ if (amdgpu_sriov_vf(adev))
+ return;
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- /* In the case squeezing vram into GART aperture, we don't use
- * FB aperture and AGP aperture. Disable them.
- */
- if (adev->gmc.pdb0_bo) {
- WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF);
- WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
- }
- if (amdgpu_sriov_vf(adev))
- return;
+ WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
- /* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
- (u32)(value >> 12));
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
- (u32)(value >> 44));
-
- /* Program "protection fault". */
- WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
- (u32)(adev->dummy_page_addr >> 12));
- WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
- (u32)((u64)adev->dummy_page_addr >> 44));
-
- tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
- ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
- WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
+ /* In the case squeezing vram into GART aperture, we don't use
+ * FB aperture and AGP aperture. Disable them.
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_BASE,
+ 0x00FFFFFF);
+ WREG32_SOC15(MMHUB, i,
+ regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ 0x3FFFFFFF);
+ WREG32_SOC15(MMHUB, i,
+ regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
+ }
+
+ /* Set default page address. */
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
+ WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(MMHUB, i,
+ regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(MMHUB, i,
+ regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
+ }
}
static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
{
- uint32_t tmp;
+ uint32_t tmp, inst_mask;
+ int i;
/* Setup TLB control */
- tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
-
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- ENABLE_ADVANCED_DRIVER_MODEL, 1);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- MTYPE, MTYPE_UC);/* XXX for emulation. */
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
-
- WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
}
static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
{
- uint32_t tmp;
+ uint32_t tmp, inst_mask;
+ int i;
if (amdgpu_sriov_vf(adev))
return;
/* Setup L2 cache */
- tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
- /* XXX for emulation, Refer to closed source code.*/
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
- 0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
- WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp);
-
- tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
- WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2, tmp);
-
- tmp = regVM_L2_CNTL3_DEFAULT;
- if (adev->gmc.translate_further) {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
- L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
- } else {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
- L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
- }
- WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, tmp);
-
- tmp = regVM_L2_CNTL4_DEFAULT;
- if (adev->gmc.xgmi.connected_to_cpu) {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
- VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
- VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
- } else {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
- VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
- VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
+ ENABLE_L2_FRAGMENT_PROCESSING, 1);
+ /* XXX for emulation, Refer to closed source code.*/
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
+ L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION,
+ 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
+ CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
+ IDENTITY_MODE_FRAGMENT_SIZE, 0);
+ WREG32_SOC15(MMHUB, i, regVM_L2_CNTL, tmp);
+
+ tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS,
+ 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+ WREG32_SOC15(MMHUB, i, regVM_L2_CNTL2, tmp);
+
+ tmp = regVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
+ WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp);
+
+ tmp = regVM_L2_CNTL4_DEFAULT;
+ /* For AMD APP APUs setup WC memory */
+ if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ }
+ WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp);
}
- WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL4, tmp);
}
static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev)
{
- uint32_t tmp;
-
- tmp = RREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
- adev->gmc.vmid0_page_table_depth);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
- adev->gmc.vmid0_page_table_block_size);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
- WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL, tmp);
+ uint32_t tmp, inst_mask;
+ int i;
+
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
+ adev->gmc.vmid0_page_table_depth);
+ tmp = REG_SET_FIELD(tmp,
+ VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
+ adev->gmc.vmid0_page_table_block_size);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL, tmp);
+ }
}
static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev)
{
+ u32 inst_mask;
+ int i;
+
if (amdgpu_sriov_vf(adev))
return;
- WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0xFFFFFFFF);
- WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 0x0000000F);
-
- WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
- WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
-
- WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
- WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ WREG32_SOC15(MMHUB, i,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+ 0XFFFFFFFF);
+ WREG32_SOC15(MMHUB, i,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
+ 0x0000000F);
+
+ WREG32_SOC15(MMHUB, i,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
+ 0);
+ WREG32_SOC15(MMHUB, i,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
+ 0);
+
+ WREG32_SOC15(MMHUB, i,
+ regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
+ WREG32_SOC15(MMHUB, i,
+ regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
+ }
}
static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
- unsigned num_level, block_size;
- uint32_t tmp;
- int i;
+ struct amdgpu_vmhub *hub;
+ unsigned int num_level, block_size;
+ uint32_t tmp, inst_mask;
+ int i, j;
num_level = adev->vm_manager.num_level;
block_size = adev->vm_manager.block_size;
@@ -265,77 +339,80 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev)
else
block_size -= 9;
- for (i = 0; i <= 14; i++) {
- tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
- num_level);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
- 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_BLOCK_SIZE,
- block_size);
- /* On Aldebaran, XNACK can be enabled in the SQ per-process.
- * Retry faults need to be enabled for that to work.
- */
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- 1);
- WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL,
- i * hub->ctx_distance, tmp);
- WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
- i * hub->ctx_addr_distance, 0);
- WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
- i * hub->ctx_addr_distance, 0);
- WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
- i * hub->ctx_addr_distance,
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
- i * hub->ctx_addr_distance,
- upper_32_bits(adev->vm_manager.max_pfn - 1));
+ inst_mask = adev->aid_mask;
+ for_each_inst(j, inst_mask) {
+ hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
+ for (i = 0; i <= 14; i++) {
+ tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
+ i);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PAGE_TABLE_DEPTH, num_level);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
+ /* On 9.4.3, XNACK can be enabled in the SQ
+ * per-process. Retry faults need to be enabled for
+ * that to work.
+ */
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
+ WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
+ i * hub->ctx_distance, tmp);
+ WREG32_SOC15_OFFSET(MMHUB, j,
+ regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(MMHUB, j,
+ regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(MMHUB, j,
+ regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
+ WREG32_SOC15_OFFSET(MMHUB, j,
+ regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
+ }
}
}
static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
- unsigned i;
-
- for (i = 0; i < 18; ++i) {
- WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
- i * hub->eng_addr_distance, 0xffffffff);
- WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
- i * hub->eng_addr_distance, 0x1f);
+ struct amdgpu_vmhub *hub;
+ u32 i, j, inst_mask;
+
+ inst_mask = adev->aid_mask;
+ for_each_inst(j, inst_mask) {
+ hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
+ for (i = 0; i < 18; ++i) {
+ WREG32_SOC15_OFFSET(MMHUB, j,
+ regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ i * hub->eng_addr_distance, 0xffffffff);
+ WREG32_SOC15_OFFSET(MMHUB, j,
+ regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ i * hub->eng_addr_distance, 0x1f);
+ }
}
}
static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
- /*
- * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
- * VF copy registers so vbios post doesn't program them, for
- * SRIOV driver need to program them
- */
- WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
mmhub_v1_8_init_gart_aperture_regs(adev);
mmhub_v1_8_init_system_aperture_regs(adev);
@@ -352,28 +429,34 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub;
u32 tmp;
- u32 i;
+ u32 i, j, inst_mask;
/* Disable all tables */
- for (i = 0; i < 16; i++)
- WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL,
- i * hub->ctx_distance, 0);
-
- /* Setup TLB control */
- tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- ENABLE_ADVANCED_DRIVER_MODEL, 0);
- WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
-
- if (!amdgpu_sriov_vf(adev)) {
- /* Setup L2 cache */
- tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp);
- WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, 0);
+ inst_mask = adev->aid_mask;
+ for_each_inst(j, inst_mask) {
+ hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
+ for (i = 0; i < 16; i++)
+ WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
+ i * hub->ctx_distance, 0);
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE,
+ 0);
+ WREG32_SOC15(MMHUB, j, regVM_L2_CNTL, tmp);
+ WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
+ }
}
}
@@ -385,73 +468,83 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
*/
static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
- u32 tmp;
+ u32 tmp, inst_mask;
+ int i;
if (amdgpu_sriov_vf(adev))
return;
- tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
- value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- if (!value) {
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- CRASH_ON_NO_RETRY_FAULT, 1);
+ TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- CRASH_ON_RETRY_FAULT, 1);
+ NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+
+ WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
-
- WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
static void mmhub_v1_8_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
-
- hub->ctx0_ptb_addr_lo32 =
- SOC15_REG_OFFSET(MMHUB, 0,
- regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
- hub->ctx0_ptb_addr_hi32 =
- SOC15_REG_OFFSET(MMHUB, 0,
- regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
- hub->vm_inv_eng0_req =
- SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_REQ);
- hub->vm_inv_eng0_ack =
- SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ACK);
- hub->vm_context0_cntl =
- SOC15_REG_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL);
- hub->vm_l2_pro_fault_status =
- SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_STATUS);
- hub->vm_l2_pro_fault_cntl =
- SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL);
-
- hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
- hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
- regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ;
- hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
- regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+ struct amdgpu_vmhub *hub;
+ u32 inst_mask;
+ int i;
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
+
+ hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
+ hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, i,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_req =
+ SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_REQ);
+ hub->vm_inv_eng0_ack =
+ SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_ACK);
+ hub->vm_context0_cntl =
+ SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_CNTL);
+ hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, i,
+ regVM_L2_PROTECTION_FAULT_STATUS);
+ hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, i,
+ regVM_L2_PROTECTION_FAULT_CNTL);
+
+ hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
+ hub->ctx_addr_distance =
+ regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
+ regVM_INVALIDATE_ENG0_REQ;
+ hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
+ regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+ }
}
static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev,
@@ -475,3 +568,277 @@ const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
.set_clockgating = mmhub_v1_8_set_clockgating,
.get_clockgating = mmhub_v1_8_get_clockgating,
};
+
+static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = {
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_CE_ERR_STATUS_LO, regMMEA0_CE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"},
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_CE_ERR_STATUS_LO, regMMEA1_CE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"},
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_CE_ERR_STATUS_LO, regMMEA2_CE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"},
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_CE_ERR_STATUS_LO, regMMEA3_CE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"},
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_CE_ERR_STATUS_LO, regMMEA4_CE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"},
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_CE_ERR_STATUS_LO, regMM_CANE_CE_ERR_STATUS_HI),
+ 1, 0, "MM_CANE"},
+};
+
+static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ue_reg_list[] = {
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_UE_ERR_STATUS_LO, regMMEA0_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"},
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_UE_ERR_STATUS_LO, regMMEA1_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"},
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_UE_ERR_STATUS_LO, regMMEA2_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"},
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_UE_ERR_STATUS_LO, regMMEA3_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"},
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_UE_ERR_STATUS_LO, regMMEA4_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"},
+ {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_UE_ERR_STATUS_LO, regMM_CANE_UE_ERR_STATUS_HI),
+ 1, 0, "MM_CANE"},
+};
+
+static const struct amdgpu_ras_memory_id_entry mmhub_v1_8_ras_memory_list[] = {
+ {AMDGPU_MMHUB_WGMI_PAGEMEM, "MMEA_WGMI_PAGEMEM"},
+ {AMDGPU_MMHUB_RGMI_PAGEMEM, "MMEA_RGMI_PAGEMEM"},
+ {AMDGPU_MMHUB_WDRAM_PAGEMEM, "MMEA_WDRAM_PAGEMEM"},
+ {AMDGPU_MMHUB_RDRAM_PAGEMEM, "MMEA_RDRAM_PAGEMEM"},
+ {AMDGPU_MMHUB_WIO_CMDMEM, "MMEA_WIO_CMDMEM"},
+ {AMDGPU_MMHUB_RIO_CMDMEM, "MMEA_RIO_CMDMEM"},
+ {AMDGPU_MMHUB_WGMI_CMDMEM, "MMEA_WGMI_CMDMEM"},
+ {AMDGPU_MMHUB_RGMI_CMDMEM, "MMEA_RGMI_CMDMEM"},
+ {AMDGPU_MMHUB_WDRAM_CMDMEM, "MMEA_WDRAM_CMDMEM"},
+ {AMDGPU_MMHUB_RDRAM_CMDMEM, "MMEA_RDRAM_CMDMEM"},
+ {AMDGPU_MMHUB_MAM_DMEM0, "MMEA_MAM_DMEM0"},
+ {AMDGPU_MMHUB_MAM_DMEM1, "MMEA_MAM_DMEM1"},
+ {AMDGPU_MMHUB_MAM_DMEM2, "MMEA_MAM_DMEM2"},
+ {AMDGPU_MMHUB_MAM_DMEM3, "MMEA_MAM_DMEM3"},
+ {AMDGPU_MMHUB_WRET_TAGMEM, "MMEA_WRET_TAGMEM"},
+ {AMDGPU_MMHUB_RRET_TAGMEM, "MMEA_RRET_TAGMEM"},
+ {AMDGPU_MMHUB_WIO_DATAMEM, "MMEA_WIO_DATAMEM"},
+ {AMDGPU_MMHUB_WGMI_DATAMEM, "MMEA_WGMI_DATAMEM"},
+ {AMDGPU_MMHUB_WDRAM_DATAMEM, "MMEA_WDRAM_DATAMEM"},
+};
+
+static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
+ uint32_t mmhub_inst,
+ void *ras_err_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
+
+ amdgpu_ras_inst_query_ras_error_count(adev,
+ mmhub_v1_8_ce_reg_list,
+ ARRAY_SIZE(mmhub_v1_8_ce_reg_list),
+ mmhub_v1_8_ras_memory_list,
+ ARRAY_SIZE(mmhub_v1_8_ras_memory_list),
+ mmhub_inst,
+ AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
+ &err_data->ce_count);
+ amdgpu_ras_inst_query_ras_error_count(adev,
+ mmhub_v1_8_ue_reg_list,
+ ARRAY_SIZE(mmhub_v1_8_ue_reg_list),
+ mmhub_v1_8_ras_memory_list,
+ ARRAY_SIZE(mmhub_v1_8_ras_memory_list),
+ mmhub_inst,
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ &err_data->ue_count);
+}
+
+static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_err_status)
+{
+ uint32_t inst_mask;
+ uint32_t i;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ dev_warn(adev->dev, "MMHUB RAS is not supported\n");
+ return;
+ }
+
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask)
+ mmhub_v1_8_inst_query_ras_error_count(adev, i, ras_err_status);
+}
+
+static void mmhub_v1_8_inst_reset_ras_error_count(struct amdgpu_device *adev,
+ uint32_t mmhub_inst)
+{
+ amdgpu_ras_inst_reset_ras_error_count(adev,
+ mmhub_v1_8_ce_reg_list,
+ ARRAY_SIZE(mmhub_v1_8_ce_reg_list),
+ mmhub_inst);
+ amdgpu_ras_inst_reset_ras_error_count(adev,
+ mmhub_v1_8_ue_reg_list,
+ ARRAY_SIZE(mmhub_v1_8_ue_reg_list),
+ mmhub_inst);
+}
+
+static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t inst_mask;
+ uint32_t i;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ dev_warn(adev->dev, "MMHUB RAS is not supported\n");
+ return;
+ }
+
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask)
+ mmhub_v1_8_inst_reset_ras_error_count(adev, i);
+}
+
+static const u32 mmhub_v1_8_mmea_err_status_reg[] __maybe_unused = {
+ regMMEA0_ERR_STATUS,
+ regMMEA1_ERR_STATUS,
+ regMMEA2_ERR_STATUS,
+ regMMEA3_ERR_STATUS,
+ regMMEA4_ERR_STATUS,
+};
+
+static void mmhub_v1_8_inst_query_ras_err_status(struct amdgpu_device *adev,
+ uint32_t mmhub_inst)
+{
+ uint32_t reg_value;
+ uint32_t mmea_err_status_addr_dist;
+ uint32_t i;
+
+ /* query mmea ras err status */
+ mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS;
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) {
+ reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
+ regMMEA0_ERR_STATUS,
+ i * mmea_err_status_addr_dist);
+ if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
+ dev_warn(adev->dev,
+ "Detected MMEA%d err in MMHUB%d, status: 0x%x\n",
+ i, mmhub_inst, reg_value);
+ }
+ }
+
+ /* query mm_cane ras err status */
+ reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS);
+ if (REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_DATAPARITY_ERROR)) {
+ dev_warn(adev->dev,
+ "Detected MM CANE err in MMHUB%d, status: 0x%x\n",
+ mmhub_inst, reg_value);
+ }
+}
+
+static void mmhub_v1_8_query_ras_error_status(struct amdgpu_device *adev)
+{
+ uint32_t inst_mask;
+ uint32_t i;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ dev_warn(adev->dev, "MMHUB RAS is not supported\n");
+ return;
+ }
+
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask)
+ mmhub_v1_8_inst_query_ras_err_status(adev, i);
+}
+
+static void mmhub_v1_8_inst_reset_ras_err_status(struct amdgpu_device *adev,
+ uint32_t mmhub_inst)
+{
+ uint32_t mmea_cgtt_clk_cntl_addr_dist;
+ uint32_t mmea_err_status_addr_dist;
+ uint32_t reg_value;
+ uint32_t i;
+
+ /* reset mmea ras err status */
+ mmea_cgtt_clk_cntl_addr_dist = regMMEA1_CGTT_CLK_CTRL - regMMEA0_CGTT_CLK_CTRL;
+ mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS;
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) {
+ /* force clk branch on for response path
+ * set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 1
+ */
+ reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
+ regMMEA0_CGTT_CLK_CTRL,
+ i * mmea_cgtt_clk_cntl_addr_dist);
+ reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL,
+ SOFT_OVERRIDE_RETURN, 1);
+ WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
+ regMMEA0_CGTT_CLK_CTRL,
+ i * mmea_cgtt_clk_cntl_addr_dist,
+ reg_value);
+
+ /* set MMEA0_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */
+ reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
+ regMMEA0_ERR_STATUS,
+ i * mmea_err_status_addr_dist);
+ reg_value = REG_SET_FIELD(reg_value, MMEA0_ERR_STATUS,
+ CLEAR_ERROR_STATUS, 1);
+ WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
+ regMMEA0_ERR_STATUS,
+ i * mmea_err_status_addr_dist,
+ reg_value);
+
+ /* set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 0 */
+ reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
+ regMMEA0_CGTT_CLK_CTRL,
+ i * mmea_cgtt_clk_cntl_addr_dist);
+ reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL,
+ SOFT_OVERRIDE_RETURN, 0);
+ WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
+ regMMEA0_CGTT_CLK_CTRL,
+ i * mmea_cgtt_clk_cntl_addr_dist,
+ reg_value);
+ }
+
+ /* reset mm_cane ras err status
+ * force clk branch on for response path
+ * set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 1
+ */
+ reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL);
+ reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL,
+ SOFT_OVERRIDE_ATRET, 1);
+ WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value);
+
+ /* set MM_CANE_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */
+ reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS);
+ reg_value = REG_SET_FIELD(reg_value, MM_CANE_ERR_STATUS,
+ CLEAR_ERROR_STATUS, 1);
+ WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS, reg_value);
+
+ /* set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 0 */
+ reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL);
+ reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL,
+ SOFT_OVERRIDE_ATRET, 0);
+ WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value);
+}
+
+static void mmhub_v1_8_reset_ras_error_status(struct amdgpu_device *adev)
+{
+ uint32_t inst_mask;
+ uint32_t i;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ dev_warn(adev->dev, "MMHUB RAS is not supported\n");
+ return;
+ }
+
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask)
+ mmhub_v1_8_inst_reset_ras_err_status(adev, i);
+}
+
+static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = {
+ .query_ras_error_count = mmhub_v1_8_query_ras_error_count,
+ .reset_ras_error_count = mmhub_v1_8_reset_ras_error_count,
+ .query_ras_error_status = mmhub_v1_8_query_ras_error_status,
+ .reset_ras_error_status = mmhub_v1_8_reset_ras_error_status,
+};
+
+struct amdgpu_mmhub_ras mmhub_v1_8_ras = {
+ .ras_block = {
+ .hw_ops = &mmhub_v1_8_ras_hw_ops,
+ },
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h
index 0bb36200e4e5..126f0075ac50 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h
@@ -24,5 +24,6 @@
#define __MMHUB_V1_8_H__
extern const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs;
+extern struct amdgpu_mmhub_ras mmhub_v1_8_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 278e32db878d..8f76c6ecf50a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -187,7 +187,7 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
@@ -362,7 +362,7 @@ static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
int i;
uint32_t tmp;
@@ -412,7 +412,7 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned i;
for (i = 0; i < 18; ++i) {
@@ -441,7 +441,7 @@ static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
u32 tmp;
u32 i;
@@ -520,7 +520,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = {
static void mmhub_v2_0_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(MMHUB, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index fcf2813e70db..1dce053a4c4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -121,7 +121,7 @@ static void mmhub_v2_3_setup_vm_pt_regs(struct amdgpu_device *adev,
uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base));
@@ -280,7 +280,7 @@ static void mmhub_v2_3_disable_identity_aperture(struct amdgpu_device *adev)
static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
int i;
uint32_t tmp;
@@ -330,8 +330,8 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
static void mmhub_v2_3_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
- unsigned i;
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
+ unsigned int i;
for (i = 0; i < 18; ++i) {
WREG32_SOC15_OFFSET(MMHUB, 0,
@@ -373,7 +373,7 @@ static int mmhub_v2_3_gart_enable(struct amdgpu_device *adev)
static void mmhub_v2_3_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
u32 tmp;
u32 i;
@@ -406,6 +406,7 @@ static void mmhub_v2_3_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
+
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
@@ -446,7 +447,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v2_3_vmhub_funcs = {
static void mmhub_v2_3_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(MMHUB, 0,
@@ -499,11 +500,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
} else {
data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
@@ -593,13 +594,13 @@ static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u64 *flags)
/* AMD_CG_SUPPORT_MC_MGCG */
if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
- DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
&& !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
- *flags |= AMD_CG_SUPPORT_MC_MGCG;
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
}
/* AMD_CG_SUPPORT_MC_LS */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
index 17a792616979..441379e91cfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
@@ -136,7 +136,7 @@ mmhub_v3_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
static void mmhub_v3_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
@@ -319,7 +319,7 @@ static void mmhub_v3_0_disable_identity_aperture(struct amdgpu_device *adev)
static void mmhub_v3_0_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
int i;
uint32_t tmp;
@@ -369,7 +369,7 @@ static void mmhub_v3_0_setup_vmid_config(struct amdgpu_device *adev)
static void mmhub_v3_0_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned i;
for (i = 0; i < 18; ++i) {
@@ -398,7 +398,7 @@ static int mmhub_v3_0_gart_enable(struct amdgpu_device *adev)
static void mmhub_v3_0_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
u32 tmp;
u32 i;
@@ -477,7 +477,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_vmhub_funcs = {
static void mmhub_v3_0_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(MMHUB, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index 26509b6b8c24..12c7f4b46ea9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -138,7 +138,7 @@ static void mmhub_v3_0_1_setup_vm_pt_regs(struct amdgpu_device *adev,
uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
@@ -306,7 +306,7 @@ static void mmhub_v3_0_1_disable_identity_aperture(struct amdgpu_device *adev)
static void mmhub_v3_0_1_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
int i;
uint32_t tmp;
@@ -356,7 +356,7 @@ static void mmhub_v3_0_1_setup_vmid_config(struct amdgpu_device *adev)
static void mmhub_v3_0_1_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned i;
for (i = 0; i < 18; ++i) {
@@ -385,7 +385,7 @@ static int mmhub_v3_0_1_gart_enable(struct amdgpu_device *adev)
static void mmhub_v3_0_1_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
u32 tmp;
u32 i;
@@ -459,7 +459,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_1_vmhub_funcs = {
static void mmhub_v3_0_1_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(MMHUB, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
index 26abbc6a47ab..5dadc85abf7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
@@ -129,7 +129,7 @@ mmhub_v3_0_2_print_l2_protection_fault_status(struct amdgpu_device *adev,
static void mmhub_v3_0_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
@@ -311,7 +311,7 @@ static void mmhub_v3_0_2_disable_identity_aperture(struct amdgpu_device *adev)
static void mmhub_v3_0_2_setup_vmid_config(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
int i;
uint32_t tmp;
@@ -361,7 +361,7 @@ static void mmhub_v3_0_2_setup_vmid_config(struct amdgpu_device *adev)
static void mmhub_v3_0_2_program_invalidation(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned i;
for (i = 0; i < 18; ++i) {
@@ -390,7 +390,7 @@ static int mmhub_v3_0_2_gart_enable(struct amdgpu_device *adev)
static void mmhub_v3_0_2_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
u32 tmp;
u32 i;
@@ -469,7 +469,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_2_vmhub_funcs = {
static void mmhub_v3_0_2_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(MMHUB, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 72083e96222f..5718e4d40e66 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -57,7 +57,7 @@ static u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid,
uint32_t vmid, uint64_t value)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
@@ -108,7 +108,7 @@ static void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi
}
static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
- int hubid)
+ int hubid)
{
uint64_t value;
uint32_t tmp;
@@ -294,7 +294,7 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned int num_level, block_size;
uint32_t tmp;
int i;
@@ -363,7 +363,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev,
int hubid)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned i;
for (i = 0; i < 18; ++i) {
@@ -404,7 +404,7 @@ static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
u32 tmp;
u32 i, j;
@@ -507,8 +507,8 @@ static void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool
static void mmhub_v9_4_init(struct amdgpu_device *adev)
{
- struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] =
- {&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]};
+ struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] = {
+ &adev->vmhub[AMDGPU_MMHUB0(0)], &adev->vmhub[AMDGPU_MMHUB1(0)]};
int i;
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
@@ -1568,7 +1568,7 @@ static int mmhub_v9_4_get_ras_error_count(struct amdgpu_device *adev,
uint32_t sec_cnt, ded_cnt;
for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) {
- if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset)
+ if (mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset)
continue;
sec_cnt = (value &
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h
index 3e4e858a6965..a773ef61b78c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h
@@ -30,6 +30,8 @@
#define MMSCH_VERSION_MINOR 0
#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+#define MMSCH_V3_0_VCN_INSTANCES 0x2
+
enum mmsch_v3_0_command_type {
MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
@@ -47,7 +49,7 @@ struct mmsch_v3_0_table_info {
struct mmsch_v3_0_init_header {
uint32_t version;
uint32_t total_size;
- struct mmsch_v3_0_table_info inst[AMDGPU_MAX_VCN_INSTANCES];
+ struct mmsch_v3_0_table_info inst[MMSCH_V3_0_VCN_INSTANCES];
};
struct mmsch_v3_0_cmd_direct_reg_header {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
index 83653a50a1a2..796d4f8791e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
@@ -43,6 +43,8 @@
#define MMSCH_VF_MAILBOX_RESP__OK 0x1
#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+#define MMSCH_V4_0_VCN_INSTANCES 0x2
+
enum mmsch_v4_0_command_type {
MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
@@ -60,7 +62,7 @@ struct mmsch_v4_0_table_info {
struct mmsch_v4_0_init_header {
uint32_t version;
uint32_t total_size;
- struct mmsch_v4_0_table_info inst[AMDGPU_MAX_VCN_INSTANCES];
+ struct mmsch_v4_0_table_info inst[MMSCH_V4_0_VCN_INSTANCES];
struct mmsch_v4_0_table_info jpegdec;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h
index 298c136cefe0..db7eb5260295 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2021 Red Hat Inc.
+ * Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,23 +18,20 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#include "ram.h"
-
-#include <subdev/bios.h>
-#include <subdev/bios/init.h>
-#include <subdev/bios/rammap.h>
-static const struct nvkm_ram_func
-ga102_ram = {
-};
+#ifndef __MMSCH_V4_0_3_H__
+#define __MMSCH_V4_0_3_H__
-int
-ga102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
-{
- struct nvkm_device *device = fb->subdev.device;
- enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
- u32 size = nvkm_rd32(device, 0x1183a4);
+#include "amdgpu_vcn.h"
+#include "mmsch_v4_0.h"
- return nvkm_ram_new_(&ga102_ram, fb, type, (u64)size << 20, pram);
-}
+struct mmsch_v4_0_3_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_v4_0_table_info vcn0;
+ struct mmsch_v4_0_table_info mjpegdec0[4];
+ struct mmsch_v4_0_table_info mjpegdec1[4];
+};
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index cae1aaa4ddb6..6a68ee946f1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -183,12 +183,10 @@ send_request:
if (req != IDH_REQ_GPU_INIT_DATA) {
pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
return r;
- }
- else /* host doesn't support REQ_GPU_INIT_DATA handshake */
+ } else /* host doesn't support REQ_GPU_INIT_DATA handshake */
adev->virt.req_init_data_ver = 0;
} else {
- if (req == IDH_REQ_GPU_INIT_DATA)
- {
+ if (req == IDH_REQ_GPU_INIT_DATA) {
adev->virt.req_init_data_ver =
RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 288c414babdf..59f53c743362 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -334,7 +334,7 @@ static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
break;
}
mdelay(1);
- timeout -=1;
+ timeout -= 1;
reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index eec13cb5bf75..b6a8478dabf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -565,7 +565,7 @@ static int navi10_ih_sw_init(void *handle)
use_bus_addr = false;
else
use_bus_addr = true;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
if (r)
return r;
@@ -578,7 +578,7 @@ static int navi10_ih_sw_init(void *handle)
/* initialize ih control registers offset */
navi10_ih_init_register_offset(adev);
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index aa761ff3a5fa..4038455d7998 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -345,8 +345,8 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
}
#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1
-#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x00000009 // 1=1us, 9=1ms
-#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 4ms
+#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x0000000A // 1=1us, 9=1ms, 10=4ms
+#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 400ms
static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
bool enable)
@@ -479,9 +479,12 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
- data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
- data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
- data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
+ data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+ if (pci_is_thunderbolt_attached(adev->pdev))
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ else
+ data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 24d12075ca3a..9ea072374cb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -30,6 +30,20 @@
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
+#define NPS_MODE_MASK 0x000000FFL
+
+/* Core 0 Port 0 counter */
+#define smnPCIEP_NAK_COUNTER 0x1A340218
+
+#define smnPCIE_PERF_CNTL_TXCLK3 0x1A38021c
+#define smnPCIE_PERF_CNTL_TXCLK7 0x1A380888
+#define smnPCIE_PERF_COUNT_CNTL 0x1A380200
+#define smnPCIE_PERF_COUNT0_TXCLK3 0x1A380220
+#define smnPCIE_PERF_COUNT0_TXCLK7 0x1A38088C
+#define smnPCIE_PERF_COUNT0_UPVAL_TXCLK3 0x1A3808F8
+#define smnPCIE_PERF_COUNT0_UPVAL_TXCLK7 0x1A380918
+
+
static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
@@ -66,6 +80,13 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan
bool use_doorbell, int doorbell_index, int doorbell_size)
{
u32 doorbell_range = 0, doorbell_ctrl = 0;
+ int aid_id, dev_inst;
+
+ dev_inst = GET_INST(SDMA0, instance);
+ aid_id = adev->sdma.instance[instance].aid_id;
+
+ if (use_doorbell == false)
+ return;
doorbell_range =
REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0,
@@ -80,9 +101,10 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan
REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_SIZE, doorbell_size);
- switch (instance) {
+ switch (dev_inst % adev->sdma.num_inst_per_aid) {
case 0:
- WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_1, doorbell_range);
+ WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_1,
+ 4 * aid_id, doorbell_range);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
@@ -94,10 +116,12 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
0x1);
- WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL, doorbell_ctrl);
+ WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_1_CTRL,
+ aid_id, doorbell_ctrl);
break;
case 1:
- WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_2, doorbell_range);
+ WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_2,
+ 4 * aid_id, doorbell_range);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
@@ -109,10 +133,12 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
0x2);
- WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL, doorbell_ctrl);
+ WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_2_CTRL,
+ aid_id, doorbell_ctrl);
break;
case 2:
- WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_3, doorbell_range);
+ WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_3,
+ 4 * aid_id, doorbell_range);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
@@ -124,10 +150,12 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
0x8);
- WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL, doorbell_ctrl);
+ WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_5_CTRL,
+ aid_id, doorbell_ctrl);
break;
case 3:
- WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4, doorbell_range);
+ WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4,
+ 4 * aid_id, doorbell_range);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
@@ -139,11 +167,12 @@ static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instan
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
0x9);
- WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_6_CTRL, doorbell_ctrl);
+ WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_6_CTRL,
+ aid_id, doorbell_ctrl);
break;
default:
break;
- };
+ }
return;
}
@@ -152,6 +181,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
int doorbell_index, int instance)
{
u32 doorbell_range = 0, doorbell_ctrl = 0;
+ u32 aid_id = instance;
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range,
@@ -161,7 +191,12 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY,
- 0x8);
+ 0x9);
+ if (aid_id)
+ doorbell_range = REG_SET_FIELD(doorbell_range,
+ DOORBELL0_CTRL_ENTRY_0,
+ DOORBELL0_FENCE_ENABLE_ENTRY,
+ 0x4);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
@@ -174,10 +209,15 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
- S2A_DOORBELL_PORT1_RANGE_SIZE, 0x8);
+ S2A_DOORBELL_PORT1_RANGE_SIZE, 0x9);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4);
+
+ WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17,
+ aid_id, doorbell_range);
+ WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_4_CTRL,
+ aid_id, doorbell_ctrl);
} else {
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
@@ -185,10 +225,12 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_SIZE, 0);
- }
- WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17, doorbell_range);
- WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL, doorbell_ctrl);
+ WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17,
+ aid_id, doorbell_range);
+ WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_4_CTRL,
+ aid_id, doorbell_ctrl);
+ }
}
static void nbio_v7_9_enable_doorbell_aperture(struct amdgpu_device *adev,
@@ -235,7 +277,7 @@ static void nbio_v7_9_ih_doorbell_range(struct amdgpu_device *adev,
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY,
- 0x4);
+ 0x8);
ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
@@ -248,7 +290,7 @@ static void nbio_v7_9_ih_doorbell_range(struct amdgpu_device *adev,
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0);
ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
- S2A_DOORBELL_PORT1_RANGE_SIZE, 0x4);
+ S2A_DOORBELL_PORT1_RANGE_SIZE, 0x8);
ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0);
@@ -319,6 +361,11 @@ static u32 nbio_v7_9_get_pcie_data_offset(struct amdgpu_device *adev)
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_DATA2);
}
+static u32 nbio_v7_9_get_pcie_index_hi_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2_HI);
+}
+
const struct nbio_hdp_flush_reg nbio_v7_9_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
@@ -347,11 +394,126 @@ static void nbio_v7_9_enable_doorbell_interrupt(struct amdgpu_device *adev,
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
+static int nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev)
+{
+ u32 tmp, px;
+
+ tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS);
+ px = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_STATUS,
+ PARTITION_MODE);
+
+ return px;
+}
+
+static u32 nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev,
+ u32 *supp_modes)
+{
+ u32 tmp;
+
+ tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS);
+ tmp = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE);
+
+ if (supp_modes) {
+ *supp_modes =
+ RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_CAP);
+ }
+
+ return ffs(tmp);
+}
+
+static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
+{
+ u32 inst_mask;
+ int i;
+
+ WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
+ 0xff & ~(adev->gfx.xcc_mask));
+
+ WREG32_SOC15(NBIO, 0, regBIFC_GFX_INT_MONITOR_MASK, 0x7ff);
+
+ inst_mask = adev->aid_mask & ~1U;
+ for_each_inst(i, inst_mask) {
+ WREG32_SOC15_EXT(NBIO, i, regXCC_DOORBELL_FENCE, i,
+ XCC_DOORBELL_FENCE__SHUB_SLV_MODE_MASK);
+
+ }
+}
+
+static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+ u32 val, nak_r, nak_g;
+
+ if (adev->flags & AMD_IS_APU)
+ return 0;
+
+ /* Get the number of NAKs received and generated */
+ val = RREG32_PCIE(smnPCIEP_NAK_COUNTER);
+ nak_r = val & 0xFFFF;
+ nak_g = val >> 16;
+
+ /* Add the total number of NAKs, i.e the number of replays */
+ return (nak_r + nak_g);
+}
+
+static void nbio_v7_9_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1)
+{
+ uint32_t perfctrrx = 0;
+ uint32_t perfctrtx = 0;
+
+ /* This reports 0 on APUs, so return to avoid writing/reading registers
+ * that may or may not be different from their GPU counterparts
+ */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ /* Use TXCLK3 counter group for rx event */
+ /* Use TXCLK7 counter group for tx event */
+ /* Set the 2 events that we wish to watch, defined above */
+ /* 40 is event# for received msgs */
+ /* 2 is event# of posted requests sent */
+ perfctrrx = REG_SET_FIELD(perfctrrx, PCIE_PERF_CNTL_TXCLK3, EVENT0_SEL, 40);
+ perfctrtx = REG_SET_FIELD(perfctrtx, PCIE_PERF_CNTL_TXCLK7, EVENT0_SEL, 2);
+
+ /* Write to enable desired perf counters */
+ WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctrrx);
+ WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK7, perfctrtx);
+
+ /* Zero out and enable SHADOW_WR
+ * Write 0x6:
+ * Bit 1 = Global Shadow wr(1)
+ * Bit 2 = Global counter reset enable(1)
+ */
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000006);
+
+ /* Enable Gloabl Counter
+ * Write 0x1:
+ * Bit 0 = Global Counter Enable(1)
+ */
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000001);
+
+ msleep(1000);
+
+ /* Disable Global Counter, Reset and enable SHADOW_WR
+ * Write 0x6:
+ * Bit 1 = Global Shadow wr(1)
+ * Bit 2 = Global counter reset enable(1)
+ */
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000006);
+
+ /* Get the upper and lower count */
+ *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) |
+ ((uint64_t)RREG32_PCIE(smnPCIE_PERF_COUNT0_UPVAL_TXCLK3) << 32);
+ *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK7) |
+ ((uint64_t)RREG32_PCIE(smnPCIE_PERF_COUNT0_UPVAL_TXCLK7) << 32);
+}
+
const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_9_get_pcie_index_offset,
.get_pcie_data_offset = nbio_v7_9_get_pcie_data_offset,
+ .get_pcie_index_hi_offset = nbio_v7_9_get_pcie_index_hi_offset,
.get_rev_id = nbio_v7_9_get_rev_id,
.mc_access_enable = nbio_v7_9_mc_access_enable,
.get_memsize = nbio_v7_9_get_memsize,
@@ -366,4 +528,196 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.get_clockgating_state = nbio_v7_9_get_clockgating_state,
.ih_control = nbio_v7_9_ih_control,
.remap_hdp_registers = nbio_v7_9_remap_hdp_registers,
+ .get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode,
+ .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
+ .init_registers = nbio_v7_9_init_registers,
+ .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
+ .get_pcie_usage = nbio_v7_9_get_pcie_usage,
+};
+
+static void nbio_v7_9_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ return;
+}
+
+static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_CNTLR_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ if (!ras->disable_ras_err_cnt_harvest) {
+ /*
+ * clear error status after ras_controller_intr
+ * according to hw team and count ue number
+ * for query
+ */
+ nbio_v7_9_query_ras_error_count(adev, &err_data);
+
+ /* logging on error cnt and printing for awareness */
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+
+ if (err_data.ce_count)
+ dev_info(adev->dev, "%ld correctable hardware "
+ "errors detected in %s block, "
+ "no user action is needed.\n",
+ obj->err_data.ce_count,
+ get_ras_block_str(adev->nbio.ras_if));
+
+ if (err_data.ue_count)
+ dev_info(adev->dev, "%ld uncorrectable hardware "
+ "errors detected in %s block\n",
+ obj->err_data.ue_count,
+ get_ras_block_str(adev->nbio.ras_if));
+ }
+
+ dev_info(adev->dev, "RAS controller interrupt triggered "
+ "by NBIF error\n");
+
+ /* ras_controller_int is dedicated for nbif ras error,
+ * not the global interrupt for sync flood
+ */
+ amdgpu_ras_reset_gpu(adev);
+ }
+}
+
+static void nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+static int nbio_v7_9_set_ras_controller_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* Dummy function, there is no initialization operation in driver */
+
+ return 0;
+}
+
+static int nbio_v7_9_process_ras_controller_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for ras_controller_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static int nbio_v7_9_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* Dummy function, there is no initialization operation in driver */
+
+ return 0;
+}
+
+static int nbio_v7_9_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_controller_irq_funcs = {
+ .set = nbio_v7_9_set_ras_controller_irq_state,
+ .process = nbio_v7_9_process_ras_controller_irq,
+};
+
+static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_err_event_athub_irq_funcs = {
+ .set = nbio_v7_9_set_ras_err_event_athub_irq_state,
+ .process = nbio_v7_9_process_err_event_athub_irq,
+};
+
+static int nbio_v7_9_init_ras_controller_interrupt (struct amdgpu_device *adev)
+{
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_controller_irq.funcs =
+ &nbio_v7_9_ras_controller_irq_funcs;
+ adev->nbio.ras_controller_irq.num_types = 1;
+
+ /* register ras controller interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
+ &adev->nbio.ras_controller_irq);
+
+ return r;
+}
+
+static int nbio_v7_9_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
+{
+
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbio_v7_9_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+
+ return r;
+}
+
+const struct amdgpu_ras_block_hw_ops nbio_v7_9_ras_hw_ops = {
+ .query_ras_error_count = nbio_v7_9_query_ras_error_count,
+};
+
+struct amdgpu_nbio_ras nbio_v7_9_ras = {
+ .ras_block = {
+ .ras_comm = {
+ .name = "pcie_bif",
+ .block = AMDGPU_RAS_BLOCK__PCIE_BIF,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ },
+ .hw_ops = &nbio_v7_9_ras_hw_ops,
+ .ras_late_init = amdgpu_nbio_ras_late_init,
+ },
+ .handle_ras_controller_intr_no_bifring = nbio_v7_9_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_controller_interrupt = nbio_v7_9_init_ras_controller_interrupt,
+ .init_ras_err_event_athub_interrupt = nbio_v7_9_init_ras_err_event_athub_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h
index 8e04eb484328..73709771950d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h
@@ -28,5 +28,6 @@
extern const struct nbio_hdp_flush_reg nbio_v7_9_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_9_funcs;
+extern struct amdgpu_nbio_ras nbio_v7_9_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 0fb6013441f0..13aca808ecab 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -67,21 +67,18 @@
static const struct amd_ip_funcs nv_common_ip_funcs;
/* Navi */
-static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
-{
+static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
-static const struct amdgpu_video_codecs nv_video_codecs_encode =
-{
+static const struct amdgpu_video_codecs nv_video_codecs_encode = {
.codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
.codec_array = nv_video_codecs_encode_array,
};
/* Navi1x */
-static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
-{
+static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
@@ -91,8 +88,7 @@ static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codecs nv_video_codecs_decode =
-{
+static const struct amdgpu_video_codecs nv_video_codecs_decode = {
.codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
.codec_array = nv_video_codecs_decode_array,
};
@@ -108,8 +104,7 @@ static const struct amdgpu_video_codecs sc_video_codecs_encode = {
.codec_array = sc_video_codecs_encode_array,
};
-static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] =
-{
+static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
@@ -120,8 +115,7 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[]
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] =
-{
+static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
@@ -131,27 +125,23 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[]
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn0 =
-{
+static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn0 = {
.codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn0),
.codec_array = sc_video_codecs_decode_array_vcn0,
};
-static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 =
-{
+static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = {
.codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn1),
.codec_array = sc_video_codecs_decode_array_vcn1,
};
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
-static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
-{
+static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
};
-static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] =
-{
+static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
@@ -162,8 +152,7 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[]
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
-static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] =
-{
+static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
@@ -173,20 +162,17 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[]
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
-static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
-{
+static struct amdgpu_video_codecs sriov_sc_video_codecs_encode = {
.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
.codec_array = sriov_sc_video_codecs_encode_array,
};
-static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn0 =
-{
+static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn0 = {
.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0),
.codec_array = sriov_sc_video_codecs_decode_array_vcn0,
};
-static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn1 =
-{
+static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn1 = {
.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1),
.codec_array = sriov_sc_video_codecs_decode_array_vcn1,
};
@@ -341,11 +327,6 @@ void nv_grbm_select(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
}
-static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
-{
- /* todo */
-}
-
static bool nv_read_disabled_bios(struct amdgpu_device *adev)
{
/* todo */
@@ -381,12 +362,12 @@ static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
@@ -541,8 +522,7 @@ static void nv_program_aspm(struct amdgpu_device *adev)
}
-const struct amdgpu_ip_block_version nv_common_ip_block =
-{
+const struct amdgpu_ip_block_version nv_common_ip_block = {
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 1,
.minor = 0,
@@ -577,16 +557,6 @@ static bool nv_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
-static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
-{
-
- /* TODO
- * dummy implement for pcie_replay_count sysfs interface
- * */
-
- return 0;
-}
-
static void nv_init_doorbell_index(struct amdgpu_device *adev)
{
adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
@@ -632,9 +602,9 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
bool enter)
{
if (enter)
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
else
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
if (adev->gfx.funcs->update_perfmon_mgcg)
adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
@@ -647,14 +617,12 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
return 0;
}
-static const struct amdgpu_asic_funcs nv_asic_funcs =
-{
+static const struct amdgpu_asic_funcs nv_asic_funcs = {
.read_disabled_bios = &nv_read_disabled_bios,
.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
.read_register = &nv_read_register,
.reset = &nv_asic_reset,
.reset_method = &nv_asic_reset_method,
- .set_vga_state = &nv_vga_set_state,
.get_xclk = &nv_get_xclk,
.set_uvd_clocks = &nv_set_uvd_clocks,
.set_vce_clocks = &nv_set_vce_clocks,
@@ -662,7 +630,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.init_doorbell_index = &nv_init_doorbell_index,
.need_full_reset = &nv_need_full_reset,
.need_reset_on_init = &nv_need_reset_on_init,
- .get_pcie_replay_count = &nv_get_pcie_replay_count,
+ .get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count,
.supports_baco = &amdgpu_dpm_is_baco_supported,
.pre_asic_init = &nv_pre_asic_init,
.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
@@ -895,7 +863,8 @@ static int nv_common_early_init(void *handle)
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_VCN_MGCG |
- AMD_CG_SUPPORT_JPEG_MGCG;
+ AMD_CG_SUPPORT_JPEG_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
@@ -956,7 +925,8 @@ static int nv_common_early_init(void *handle)
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_VCN_MGCG |
- AMD_CG_SUPPORT_JPEG_MGCG;
+ AMD_CG_SUPPORT_JPEG_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG |
diff --git a/drivers/gpu/drm/amd/amdgpu/nvd.h b/drivers/gpu/drm/amd/amdgpu/nvd.h
index fd6b58243b03..631dafb92299 100644
--- a/drivers/gpu/drm/amd/amdgpu/nvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/nvd.h
@@ -462,6 +462,9 @@
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
#define PACKET3_RUN_LIST 0xA5
#define PACKET3_MAP_PROCESS_VM 0xA6
-
+/* GFX11 */
+#define PACKET3_SET_Q_PREEMPTION_MODE 0xF0
+# define PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(x) ((x) << 0)
+# define PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM (1 << 0)
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 22c775f39119..18917df785ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -102,6 +102,7 @@ enum psp_gfx_cmd_id
GFX_CMD_ID_LOAD_TOC = 0x00000020, /* Load TOC and obtain TMR size */
GFX_CMD_ID_AUTOLOAD_RLC = 0x00000021, /* Indicates all graphics fw loaded, start RLC autoload */
GFX_CMD_ID_BOOT_CFG = 0x00000022, /* Boot Config */
+ GFX_CMD_ID_SRIOV_SPATIAL_PART = 0x00000027, /* Configure spatial partitioning mode */
};
/* PSP boot config sub-commands */
@@ -338,6 +339,13 @@ struct psp_gfx_cmd_boot_cfg
uint32_t boot_config_valid; /* dynamic boot configuration valid bits bitmask */
};
+struct psp_gfx_cmd_sriov_spatial_part {
+ uint32_t mode;
+ uint32_t override_ips;
+ uint32_t override_xcds_avail;
+ uint32_t override_this_aid;
+};
+
/* All GFX ring buffer commands. */
union psp_gfx_commands
{
@@ -351,6 +359,7 @@ union psp_gfx_commands
struct psp_gfx_cmd_setup_tmr cmd_setup_vmr;
struct psp_gfx_cmd_load_toc cmd_load_toc;
struct psp_gfx_cmd_boot_cfg boot_cfg;
+ struct psp_gfx_cmd_sriov_spatial_part cmd_spatial_part;
};
struct psp_gfx_uresp_reserved
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index e1b7fca09666..5f10883da6a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -57,7 +57,13 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
if (err)
return err;
- return psp_init_ta_microcode(psp, ucode_prefix);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 1, 0)) &&
+ (adev->pdev->revision == 0xa1) &&
+ (psp->securedisplay_context.context.bin_desc.fw_version >= 0x27000008)) {
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
+ }
+ return err;
}
static int psp_v10_0_ring_create(struct psp_context *psp,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index caee76ab7110..10b17bd5aebe 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -49,6 +49,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -93,6 +96,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
+ case IP_VERSION(14, 0, 0):
err = psp_init_toc_microcode(psp, ucode_prefix);
if (err)
return err;
@@ -136,14 +140,15 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
int ret;
int retry_loop;
+ /* Wait for bootloader to signify that it is ready having bit 31 of
+ * C2PMSG_35 set to 1. All other bits are expected to be cleared.
+ * If there is an error in processing command, bits[7:0] will be set.
+ * This is applicable for PSP v13.0.6 and newer.
+ */
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
- /* Wait for bootloader to signify that is
- ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0xffffffff, false);
if (ret == 0)
return 0;
@@ -624,10 +629,11 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd)
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_73, 1);
if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE)
- return 0;
-
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, false);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
@@ -685,6 +691,27 @@ static int psp_v13_0_vbflash_status(struct psp_context *psp)
return RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_115);
}
+static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 10)) {
+ uint32_t reg_data;
+ /* MP1 fatal error: trigger PSP dram read to unhalt PSP
+ * during MP1 triggered sync flood.
+ */
+ reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67, reg_data + 0x10);
+
+ /* delay 1000ms for the mode1 reset for fatal error
+ * to be recovered back.
+ */
+ msleep(1000);
+ }
+
+ return 0;
+}
+
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.bootloader_load_kdb = psp_v13_0_bootloader_load_kdb,
@@ -704,7 +731,8 @@ static const struct psp_funcs psp_v13_0_funcs = {
.load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw,
.read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw,
.update_spirom = psp_v13_0_update_spirom,
- .vbflash_stat = psp_v13_0_vbflash_status
+ .vbflash_stat = psp_v13_0_vbflash_status,
+ .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h
index b2414a729ca1..de5677ce4330 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h
@@ -25,6 +25,8 @@
#include "amdgpu_psp.h"
+#define PSP_SPIROM_UPDATE_TIMEOUT 60000 /* 60s */
+
void psp_v13_0_set_psp_funcs(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index fd2a7b66ac56..51afc92994a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -466,8 +466,6 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
#endif
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
-
- ring->sched.ready = true;
}
sdma_v2_4_enable(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index e572389089d2..344202870aeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -734,8 +734,6 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
#endif
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
-
- ring->sched.ready = true;
}
/* unhalt the MEs */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 9295ac7edd56..cd37f45e01a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1114,8 +1114,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
#endif
/* enable DMA IBs */
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
-
- ring->sched.ready = true;
}
/**
@@ -1202,8 +1200,6 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
#endif
/* enable DMA IBs */
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
-
- ring->sched.ready = true;
}
static void
@@ -1825,12 +1821,12 @@ static int sdma_v4_0_sw_init(void *handle)
/*
* On Arcturus, SDMA instance 5~7 has a different vmhub
- * type(AMDGPU_MMHUB_1).
+ * type(AMDGPU_MMHUB1).
*/
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
- ring->vm_hub = AMDGPU_MMHUB_1;
+ ring->vm_hub = AMDGPU_MMHUB1(0);
else
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
@@ -1847,13 +1843,23 @@ static int sdma_v4_0_sw_init(void *handle)
/* paging queue use same doorbell index/routing as gfx queue
* with 0x400 (4096 dwords) offset on second doorbell page
*/
- ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
- ring->doorbell_index += 0x400;
+ if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) &&
+ adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) {
+ ring->doorbell_index =
+ adev->doorbell_index.sdma_engine[i] << 1;
+ ring->doorbell_index += 0x400;
+ } else {
+ /* From vega20, the sdma_doorbell_range in 1st
+ * doorbell page is reserved for page queue.
+ */
+ ring->doorbell_index =
+ (adev->doorbell_index.sdma_engine[i] + 1) << 1;
+ }
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
- ring->vm_hub = AMDGPU_MMHUB_1;
+ ring->vm_hub = AMDGPU_MMHUB1(0);
else
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "page%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
@@ -2306,7 +2312,7 @@ const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
- .align_mask = 0xf,
+ .align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
@@ -2338,7 +2344,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
- .align_mask = 0xf,
+ .align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
index 6f9895cdddb1..0ddb6955a6d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
@@ -141,6 +141,10 @@ static const struct soc15_ras_field_entry sdma_v4_4_ras_fields[] = {
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_RDBST_FIFO_SED),
0, 0,
},
+ { "SDMA_UTCL1_WR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_WR_FIFO_SED),
+ 0, 0,
+ },
{ "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_DATA_LUT_FIFO_SED),
0, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 64dcaa2670dd..f413898dda37 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include "amdgpu.h"
+#include "amdgpu_xcp.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
@@ -53,11 +54,14 @@ static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
+static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
{
- return (adev->reg_offset[SDMA0_HWIP][instance][0] + offset);
+ u32 dev_inst = GET_INST(SDMA0, instance);
+
+ return (adev->reg_offset[SDMA0_HWIP][dev_inst][0] + offset);
}
static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num)
@@ -92,13 +96,25 @@ static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id)
}
}
-static void sdma_v4_4_2_init_golden_registers(struct amdgpu_device *adev)
+static void sdma_v4_4_2_inst_init_golden_registers(struct amdgpu_device *adev,
+ uint32_t inst_mask)
{
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
- case IP_VERSION(4, 4, 2):
- break;
- default:
- break;
+ u32 val;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ val = RREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG);
+ val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG, NUM_BANKS, 4);
+ val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG,
+ PIPE_INTERLEAVE_SIZE, 0);
+ WREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG, val);
+
+ val = RREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG_READ);
+ val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG_READ, NUM_BANKS,
+ 4);
+ val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG_READ,
+ PIPE_INTERLEAVE_SIZE, 0);
+ WREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG_READ, val);
}
}
@@ -399,19 +415,21 @@ static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
/**
- * sdma_v4_4_2_gfx_stop - stop the gfx async dma engines
+ * sdma_v4_4_2_inst_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
+ * @inst_mask: mask of dma engine instances to be disabled
*
* Stop the gfx async dma ring buffers.
*/
-static void sdma_v4_4_2_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
+ uint32_t inst_mask)
{
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i, unset = 0;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
sdma[i] = &adev->sdma.instance[i].ring;
if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
@@ -429,32 +447,36 @@ static void sdma_v4_4_2_gfx_stop(struct amdgpu_device *adev)
}
/**
- * sdma_v4_4_2_rlc_stop - stop the compute async dma engines
+ * sdma_v4_4_2_inst_rlc_stop - stop the compute async dma engines
*
* @adev: amdgpu_device pointer
+ * @inst_mask: mask of dma engine instances to be disabled
*
* Stop the compute async dma queues.
*/
-static void sdma_v4_4_2_rlc_stop(struct amdgpu_device *adev)
+static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
+ uint32_t inst_mask)
{
/* XXX todo */
}
/**
- * sdma_v4_4_2_page_stop - stop the page async dma engines
+ * sdma_v4_4_2_inst_page_stop - stop the page async dma engines
*
* @adev: amdgpu_device pointer
+ * @inst_mask: mask of dma engine instances to be disabled
*
* Stop the page async dma ring buffers.
*/
-static void sdma_v4_4_2_page_stop(struct amdgpu_device *adev)
+static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
+ uint32_t inst_mask)
{
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
bool unset = false;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
sdma[i] = &adev->sdma.instance[i].page;
if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
@@ -475,14 +497,16 @@ static void sdma_v4_4_2_page_stop(struct amdgpu_device *adev)
}
/**
- * sdma_v4_4_2_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v4_4_2_inst_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
+ * @inst_mask: mask of dma engine instances to be enabled
*
* Halt or unhalt the async dma engines context switch.
*/
-static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+static void sdma_v4_4_2_inst_ctx_switch_enable(struct amdgpu_device *adev,
+ bool enable, uint32_t inst_mask)
{
u32 f32_cntl, phase_quantum = 0;
int i;
@@ -511,7 +535,7 @@ static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enabl
unit << SDMA_PHASE0_QUANTUM__UNIT__SHIFT;
}
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
f32_cntl = RREG32_SDMA(i, regSDMA_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
@@ -525,30 +549,39 @@ static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enabl
/* Extend page fault timeout to avoid interrupt storm */
WREG32_SDMA(i, regSDMA_UTCL1_TIMEOUT, 0x00800080);
}
-
}
/**
- * sdma_v4_4_2_enable - stop the async dma engines
+ * sdma_v4_4_2_inst_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs.
+ * @inst_mask: mask of dma engine instances to be enabled
*
* Halt or unhalt the async dma engines.
*/
-static void sdma_v4_4_2_enable(struct amdgpu_device *adev, bool enable)
+static void sdma_v4_4_2_inst_enable(struct amdgpu_device *adev, bool enable,
+ uint32_t inst_mask)
{
u32 f32_cntl;
int i;
if (!enable) {
- sdma_v4_4_2_gfx_stop(adev);
- sdma_v4_4_2_rlc_stop(adev);
+ sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
+ sdma_v4_4_2_inst_rlc_stop(adev, inst_mask);
if (adev->sdma.has_page_queue)
- sdma_v4_4_2_page_stop(adev);
+ sdma_v4_4_2_inst_page_stop(adev, inst_mask);
+
+ /* SDMA FW needs to respond to FREEZE requests during reset.
+ * Keep it running during reset */
+ if (!amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
+ return;
}
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
+ return;
+
+ for_each_inst(i, inst_mask) {
f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1);
WREG32_SDMA(i, regSDMA_F32_CNTL, f32_cntl);
@@ -659,8 +692,6 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
#endif
/* enable DMA IBs */
WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
-
- ring->sched.ready = true;
}
/**
@@ -750,8 +781,6 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
#endif
/* enable DMA IBs */
WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
-
- ring->sched.ready = true;
}
static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
@@ -760,14 +789,16 @@ static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
}
/**
- * sdma_v4_4_2_rlc_resume - setup and start the async dma engines
+ * sdma_v4_4_2_inst_rlc_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
+ * @inst_mask: mask of dma engine instances to be enabled
*
* Set up the compute DMA queues and enable them.
* Returns 0 for success, error for failure.
*/
-static int sdma_v4_4_2_rlc_resume(struct amdgpu_device *adev)
+static int sdma_v4_4_2_inst_rlc_resume(struct amdgpu_device *adev,
+ uint32_t inst_mask)
{
sdma_v4_4_2_init_pg(adev);
@@ -775,14 +806,16 @@ static int sdma_v4_4_2_rlc_resume(struct amdgpu_device *adev)
}
/**
- * sdma_v4_4_2_load_microcode - load the sDMA ME ucode
+ * sdma_v4_4_2_inst_load_microcode - load the sDMA ME ucode
*
* @adev: amdgpu_device pointer
+ * @inst_mask: mask of dma engine instances to be enabled
*
* Loads the sDMA0/1 ucode.
* Returns 0 for success, -EINVAL if the ucode is not available.
*/
-static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev)
+static int sdma_v4_4_2_inst_load_microcode(struct amdgpu_device *adev,
+ uint32_t inst_mask)
{
const struct sdma_firmware_header_v1_0 *hdr;
const __le32 *fw_data;
@@ -790,9 +823,9 @@ static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev)
int i, j;
/* halt the MEs */
- sdma_v4_4_2_enable(adev, false);
+ sdma_v4_4_2_inst_enable(adev, false, inst_mask);
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
if (!adev->sdma.instance[i].fw)
return -EINVAL;
@@ -818,38 +851,42 @@ static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev)
}
/**
- * sdma_v4_4_2_start - setup and start the async dma engines
+ * sdma_v4_4_2_inst_start - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
+ * @inst_mask: mask of dma engine instances to be enabled
*
* Set up the DMA engines and enable them.
* Returns 0 for success, error for failure.
*/
-static int sdma_v4_4_2_start(struct amdgpu_device *adev)
+static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
+ uint32_t inst_mask)
{
struct amdgpu_ring *ring;
+ uint32_t tmp_mask;
int i, r = 0;
if (amdgpu_sriov_vf(adev)) {
- sdma_v4_4_2_ctx_switch_enable(adev, false);
- sdma_v4_4_2_enable(adev, false);
+ sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
+ sdma_v4_4_2_inst_enable(adev, false, inst_mask);
} else {
/* bypass sdma microcode loading on Gopher */
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
- !(adev->pdev->device == 0x49) && !(adev->pdev->device == 0x50)) {
- r = sdma_v4_4_2_load_microcode(adev);
+ adev->sdma.instance[0].fw) {
+ r = sdma_v4_4_2_inst_load_microcode(adev, inst_mask);
if (r)
return r;
}
/* unhalt the MEs */
- sdma_v4_4_2_enable(adev, true);
+ sdma_v4_4_2_inst_enable(adev, true, inst_mask);
/* enable sdma ring preemption */
- sdma_v4_4_2_ctx_switch_enable(adev, true);
+ sdma_v4_4_2_inst_ctx_switch_enable(adev, true, inst_mask);
}
/* start the gfx rings and rlc compute queues */
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ tmp_mask = inst_mask;
+ for_each_inst(i, tmp_mask) {
uint32_t temp;
WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
@@ -860,31 +897,31 @@ static int sdma_v4_4_2_start(struct amdgpu_device *adev)
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, regSDMA_CNTL);
temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
+ /* enable context empty interrupt during initialization */
+ temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
WREG32_SDMA(i, regSDMA_CNTL, temp);
if (!amdgpu_sriov_vf(adev)) {
- ring = &adev->sdma.instance[i].ring;
- adev->nbio.funcs->sdma_doorbell_range(adev, i,
- ring->use_doorbell, ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
-
- /* unhalt engine */
- temp = RREG32_SDMA(i, regSDMA_F32_CNTL);
- temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0);
- WREG32_SDMA(i, regSDMA_F32_CNTL, temp);
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ /* unhalt engine */
+ temp = RREG32_SDMA(i, regSDMA_F32_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0);
+ WREG32_SDMA(i, regSDMA_F32_CNTL, temp);
+ }
}
}
if (amdgpu_sriov_vf(adev)) {
- sdma_v4_4_2_ctx_switch_enable(adev, true);
- sdma_v4_4_2_enable(adev, true);
+ sdma_v4_4_2_inst_ctx_switch_enable(adev, true, inst_mask);
+ sdma_v4_4_2_inst_enable(adev, true, inst_mask);
} else {
- r = sdma_v4_4_2_rlc_resume(adev);
+ r = sdma_v4_4_2_inst_rlc_resume(adev, inst_mask);
if (r)
return r;
}
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ tmp_mask = inst_mask;
+ for_each_inst(i, tmp_mask) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_helper(ring);
@@ -1221,6 +1258,7 @@ static int sdma_v4_4_2_early_init(void *handle)
sdma_v4_4_2_set_buffer_funcs(adev);
sdma_v4_4_2_set_vm_pte_funcs(adev);
sdma_v4_4_2_set_irq_funcs(adev);
+ sdma_v4_4_2_set_ras_funcs(adev);
return 0;
}
@@ -1253,9 +1291,10 @@ static int sdma_v4_4_2_sw_init(void *handle)
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 aid_id;
/* SDMA trap event */
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
@@ -1264,7 +1303,7 @@ static int sdma_v4_4_2_sw_init(void *handle)
}
/* SDMA SRAM ECC event */
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
&adev->sdma.ecc_irq);
@@ -1273,7 +1312,7 @@ static int sdma_v4_4_2_sw_init(void *handle)
}
/* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_VM_HOLE,
&adev->sdma.vm_hole_irq);
@@ -1303,15 +1342,17 @@ static int sdma_v4_4_2_sw_init(void *handle)
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
+ aid_id = adev->sdma.instance[i].aid_id;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
/* doorbell size is 2 dwords, get DWORD offset */
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(aid_id);
- sprintf(ring->name, "sdma%d", i);
+ sprintf(ring->name, "sdma%d.%d", aid_id,
+ i % adev->sdma.num_inst_per_aid);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -1323,14 +1364,15 @@ static int sdma_v4_4_2_sw_init(void *handle)
ring->ring_obj = NULL;
ring->use_doorbell = true;
- /* paging queue use same doorbell index/routing as gfx queue
- * with 0x400 (4096 dwords) offset on second doorbell page
+ /* doorbell index of page queue is assigned right after
+ * gfx queue on the same instance
*/
- ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
- ring->doorbell_index += 0x400;
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->doorbell_index =
+ (adev->doorbell_index.sdma_engine[i] + 1) << 1;
+ ring->vm_hub = AMDGPU_MMHUB0(aid_id);
- sprintf(ring->name, "page%d", i);
+ sprintf(ring->name, "page%d.%d", aid_id,
+ i % adev->sdma.num_inst_per_aid);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
@@ -1340,6 +1382,11 @@ static int sdma_v4_4_2_sw_init(void *handle)
}
}
+ if (amdgpu_sdma_ras_sw_init(adev)) {
+ dev_err(adev->dev, "fail to initialize sdma ras block\n");
+ return -EINVAL;
+ }
+
return r;
}
@@ -1366,14 +1413,13 @@ static int sdma_v4_4_2_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t inst_mask;
- if (adev->flags & AMD_IS_APU)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
-
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!amdgpu_sriov_vf(adev))
- sdma_v4_4_2_init_golden_registers(adev);
+ sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
- r = sdma_v4_4_2_start(adev);
+ r = sdma_v4_4_2_inst_start(adev, inst_mask);
return r;
}
@@ -1381,26 +1427,36 @@ static int sdma_v4_4_2_hw_init(void *handle)
static int sdma_v4_4_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t inst_mask;
int i;
if (amdgpu_sriov_vf(adev))
return 0;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
}
- sdma_v4_4_2_ctx_switch_enable(adev, false);
- sdma_v4_4_2_enable(adev, false);
+ sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
+ sdma_v4_4_2_inst_enable(adev, false, inst_mask);
return 0;
}
+static int sdma_v4_4_2_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state);
+
static int sdma_v4_4_2_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_in_reset(adev))
+ sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+
return sdma_v4_4_2_hw_fini(adev);
}
@@ -1471,13 +1527,31 @@ static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t instance;
+ uint32_t instance, i;
DRM_DEBUG("IH: SDMA trap\n");
instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
+
+ /* Client id gives the SDMA instance in AID. To know the exact SDMA
+ * instance, interrupt entry gives the node id which corresponds to the AID instance.
+ * Match node id with the AID id associated with the SDMA instance. */
+ for (i = instance; i < adev->sdma.num_instances;
+ i += adev->sdma.num_inst_per_aid) {
+ if (adev->sdma.instance[i].aid_id ==
+ node_id_to_phys_map[entry->node_id])
+ break;
+ }
+
+ if (i >= adev->sdma.num_instances) {
+ dev_WARN_ONCE(
+ adev->dev, 1,
+ "Couldn't find the right sdma instance in trap handler");
+ return 0;
+ }
+
switch (entry->ring_id) {
case 0:
- amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+ amdgpu_fence_process(&adev->sdma.instance[i].ring);
break;
default:
break;
@@ -1496,7 +1570,7 @@ static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
* be disabled and the driver should only look for the aggregated
* interrupt via sync flood
*/
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA))
goto out;
instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
@@ -1535,15 +1609,22 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 sdma_edc_config;
+ u32 sdma_cntl;
- sdma_edc_config = RREG32_SDMA(type, regCC_SDMA_EDC_CONFIG);
- /*
- * FIXME: This was inherited from Aldebaran, but no this field
- * definition in the regspec of both Aldebaran and SDMA 4.4.2
- */
- sdma_edc_config |= (state == AMDGPU_IRQ_STATE_ENABLE) ? (1 << 2) : 0;
- WREG32_SDMA(type, regCC_SDMA_EDC_CONFIG, sdma_edc_config);
+ sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
+ DRAM_ECC_INT_ENABLE, 0);
+ WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
+ break;
+ /* sdma ecc interrupt is enabled by default
+ * driver doesn't need to do anything to
+ * enable the interrupt */
+ case AMDGPU_IRQ_STATE_ENABLE:
+ default:
+ break;
+ }
return 0;
}
@@ -1615,19 +1696,49 @@ static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
return 0;
}
-static void sdma_v4_4_2_update_medium_grain_clock_gating(
- struct amdgpu_device *adev,
- bool enable)
+static void sdma_v4_4_2_inst_update_medium_grain_light_sleep(
+ struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
{
uint32_t data, def;
int i;
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ /* leave as default if it is not driver controlled */
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS))
+ return;
+
+ if (enable) {
+ for_each_inst(i, inst_mask) {
+ /* 1-not override: enable sdma mem light sleep */
+ def = data = RREG32_SDMA(i, regSDMA_POWER_CNTL);
+ data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ if (def != data)
+ WREG32_SDMA(i, regSDMA_POWER_CNTL, data);
+ }
+ } else {
+ for_each_inst(i, inst_mask) {
+ /* 0-override:disable sdma mem light sleep */
+ def = data = RREG32_SDMA(i, regSDMA_POWER_CNTL);
+ data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ if (def != data)
+ WREG32_SDMA(i, regSDMA_POWER_CNTL, data);
+ }
+ }
+}
+
+static void sdma_v4_4_2_inst_update_medium_grain_clock_gating(
+ struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
+{
+ uint32_t data, def;
+ int i;
+
+ /* leave as default if it is not driver controlled */
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG))
+ return;
+
+ if (enable) {
+ for_each_inst(i, inst_mask) {
def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
- data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK |
- SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK |
- SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+ data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
@@ -1637,11 +1748,9 @@ static void sdma_v4_4_2_update_medium_grain_clock_gating(
WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
}
} else {
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
- data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK |
- SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK |
- SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+ data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
@@ -1653,45 +1762,21 @@ static void sdma_v4_4_2_update_medium_grain_clock_gating(
}
}
-
-static void sdma_v4_4_2_update_medium_grain_light_sleep(
- struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t data, def;
- int i;
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- /* 1-not override: enable sdma mem light sleep */
- def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL);
- data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
- if (def != data)
- WREG32_SDMA(0, regSDMA_POWER_CNTL, data);
- }
- } else {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- /* 0-override:disable sdma mem light sleep */
- def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL);
- data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
- if (def != data)
- WREG32_SDMA(0, regSDMA_POWER_CNTL, data);
- }
- }
-}
-
static int sdma_v4_4_2_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t inst_mask;
if (amdgpu_sriov_vf(adev))
return 0;
- sdma_v4_4_2_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE);
- sdma_v4_4_2_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE);
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
+
+ sdma_v4_4_2_inst_update_medium_grain_clock_gating(
+ adev, state == AMD_CG_STATE_GATE, inst_mask);
+ sdma_v4_4_2_inst_update_medium_grain_light_sleep(
+ adev, state == AMD_CG_STATE_GATE, inst_mask);
return 0;
}
@@ -1710,12 +1795,12 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
*flags = 0;
/* AMD_CG_SUPPORT_SDMA_MGCG */
- data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_CLK_CTRL));
- if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK))
+ data = RREG32(SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, 0), regSDMA_CLK_CTRL));
+ if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK))
*flags |= AMD_CG_SUPPORT_SDMA_MGCG;
/* AMD_CG_SUPPORT_SDMA_LS */
- data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_POWER_CNTL));
+ data = RREG32(SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, 0), regSDMA_POWER_CNTL));
if (data & SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
@@ -1740,7 +1825,7 @@ const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
- .align_mask = 0xf,
+ .align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.get_rptr = sdma_v4_4_2_ring_get_rptr,
@@ -1771,7 +1856,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
- .align_mask = 0xf,
+ .align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.get_rptr = sdma_v4_4_2_ring_get_rptr,
@@ -1802,7 +1887,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
{
- int i;
+ int i, dev_inst;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs;
@@ -1812,6 +1897,11 @@ static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
&sdma_v4_4_2_page_ring_funcs;
adev->sdma.instance[i].page.me = i;
}
+
+ dev_inst = GET_INST(SDMA0, i);
+ /* AID to which SDMA belongs depends on physical instance */
+ adev->sdma.instance[i].aid_id =
+ dev_inst / adev->sdma.num_inst_per_aid;
}
}
@@ -1965,3 +2055,146 @@ const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
.rev = 0,
.funcs = &sdma_v4_4_2_ip_funcs,
};
+
+static int sdma_v4_4_2_xcp_resume(void *handle, uint32_t inst_mask)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ if (!amdgpu_sriov_vf(adev))
+ sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
+
+ r = sdma_v4_4_2_inst_start(adev, inst_mask);
+
+ return r;
+}
+
+static int sdma_v4_4_2_xcp_suspend(void *handle, uint32_t inst_mask)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ uint32_t tmp_mask = inst_mask;
+ int i;
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for_each_inst(i, tmp_mask) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
+ }
+
+ sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
+ sdma_v4_4_2_inst_enable(adev, false, inst_mask);
+
+ return 0;
+}
+
+struct amdgpu_xcp_ip_funcs sdma_v4_4_2_xcp_funcs = {
+ .suspend = &sdma_v4_4_2_xcp_suspend,
+ .resume = &sdma_v4_4_2_xcp_resume
+};
+
+static const struct amdgpu_ras_err_status_reg_entry sdma_v4_2_2_ue_reg_list[] = {
+ {AMDGPU_RAS_REG_ENTRY(SDMA0, 0, regSDMA_UE_ERR_STATUS_LO, regSDMA_UE_ERR_STATUS_HI),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SDMA"},
+};
+
+static const struct amdgpu_ras_memory_id_entry sdma_v4_4_2_ras_memory_list[] = {
+ {AMDGPU_SDMA_MBANK_DATA_BUF0, "SDMA_MBANK_DATA_BUF0"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF1, "SDMA_MBANK_DATA_BUF1"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF2, "SDMA_MBANK_DATA_BUF2"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF3, "SDMA_MBANK_DATA_BUF3"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF4, "SDMA_MBANK_DATA_BUF4"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF5, "SDMA_MBANK_DATA_BUF5"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF6, "SDMA_MBANK_DATA_BUF6"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF7, "SDMA_MBANK_DATA_BUF7"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF8, "SDMA_MBANK_DATA_BUF8"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF9, "SDMA_MBANK_DATA_BUF9"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF10, "SDMA_MBANK_DATA_BUF10"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF11, "SDMA_MBANK_DATA_BUF11"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF12, "SDMA_MBANK_DATA_BUF12"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF13, "SDMA_MBANK_DATA_BUF13"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF14, "SDMA_MBANK_DATA_BUF14"},
+ {AMDGPU_SDMA_MBANK_DATA_BUF15, "SDMA_MBANK_DATA_BUF15"},
+ {AMDGPU_SDMA_UCODE_BUF, "SDMA_UCODE_BUF"},
+ {AMDGPU_SDMA_RB_CMD_BUF, "SDMA_RB_CMD_BUF"},
+ {AMDGPU_SDMA_IB_CMD_BUF, "SDMA_IB_CMD_BUF"},
+ {AMDGPU_SDMA_UTCL1_RD_FIFO, "SDMA_UTCL1_RD_FIFO"},
+ {AMDGPU_SDMA_UTCL1_RDBST_FIFO, "SDMA_UTCL1_RDBST_FIFO"},
+ {AMDGPU_SDMA_UTCL1_WR_FIFO, "SDMA_UTCL1_WR_FIFO"},
+ {AMDGPU_SDMA_DATA_LUT_FIFO, "SDMA_DATA_LUT_FIFO"},
+ {AMDGPU_SDMA_SPLIT_DAT_BUF, "SDMA_SPLIT_DAT_BUF"},
+};
+
+static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
+ uint32_t sdma_inst,
+ void *ras_err_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
+ uint32_t sdma_dev_inst = GET_INST(SDMA0, sdma_inst);
+
+ /* sdma v4_4_2 doesn't support query ce counts */
+ amdgpu_ras_inst_query_ras_error_count(adev,
+ sdma_v4_2_2_ue_reg_list,
+ ARRAY_SIZE(sdma_v4_2_2_ue_reg_list),
+ sdma_v4_4_2_ras_memory_list,
+ ARRAY_SIZE(sdma_v4_4_2_ras_memory_list),
+ sdma_dev_inst,
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ &err_data->ue_count);
+}
+
+static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_err_status)
+{
+ uint32_t inst_mask;
+ int i = 0;
+
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for_each_inst(i, inst_mask)
+ sdma_v4_4_2_inst_query_ras_error_count(adev, i, ras_err_status);
+ } else {
+ dev_warn(adev->dev, "SDMA RAS is not supported\n");
+ }
+}
+
+static void sdma_v4_4_2_inst_reset_ras_error_count(struct amdgpu_device *adev,
+ uint32_t sdma_inst)
+{
+ uint32_t sdma_dev_inst = GET_INST(SDMA0, sdma_inst);
+
+ amdgpu_ras_inst_reset_ras_error_count(adev,
+ sdma_v4_2_2_ue_reg_list,
+ ARRAY_SIZE(sdma_v4_2_2_ue_reg_list),
+ sdma_dev_inst);
+}
+
+static void sdma_v4_4_2_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t inst_mask;
+ int i = 0;
+
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for_each_inst(i, inst_mask)
+ sdma_v4_4_2_inst_reset_ras_error_count(adev, i);
+ } else {
+ dev_warn(adev->dev, "SDMA RAS is not supported\n");
+ }
+}
+
+static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = {
+ .query_ras_error_count = sdma_v4_4_2_query_ras_error_count,
+ .reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count,
+};
+
+static struct amdgpu_sdma_ras sdma_v4_4_2_ras = {
+ .ras_block = {
+ .hw_ops = &sdma_v4_4_2_ras_hw_ops,
+ },
+};
+
+static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev)
+{
+ adev->sdma.ras = &sdma_v4_4_2_ras;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h
index 4814e8a074d6..d516145529bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h
@@ -27,4 +27,6 @@
extern const struct amd_ip_funcs sdma_v4_4_2_ip_funcs;
extern const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block;
+extern struct amdgpu_xcp_ip_funcs sdma_v4_4_2_xcp_funcs;
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 92e1299be021..1cc34efb455b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -237,17 +237,15 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
// emulation only, won't work on real chip
// navi10 real chip need to use PSP to load firmware
static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
-{ int ret, i;
-
- if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
- return 0;
+{
+ int ret, i;
for (i = 0; i < adev->sdma.num_instances; i++) {
ret = amdgpu_sdma_init_microcode(adev, i, false);
if (ret)
return ret;
}
-
+
return ret;
}
@@ -819,8 +817,6 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
- ring->sched.ready = true;
-
if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
sdma_v5_0_ctx_switch_enable(adev, true);
sdma_v5_0_enable(adev, true);
@@ -1389,7 +1385,7 @@ static int sdma_v5_0_sw_init(void *handle)
(adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset
: (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
- ring->vm_hub = AMDGPU_GFXHUB_0;
+ ring->vm_hub = AMDGPU_GFXHUB(0);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
(i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index ca7e8757d78e..2b3ebebc4299 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -617,18 +617,14 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
- ring->sched.ready = true;
-
if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
sdma_v5_2_ctx_switch_enable(adev, true);
sdma_v5_2_enable(adev, true);
}
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -1253,7 +1249,7 @@ static int sdma_v5_2_sw_init(void *handle)
ring->doorbell_index =
(adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
- ring->vm_hub = AMDGPU_GFXHUB_0;
+ ring->vm_hub = AMDGPU_GFXHUB(0);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
@@ -1511,6 +1507,30 @@ static int sdma_v5_2_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static bool sdma_v5_2_firmware_mgcg_support(struct amdgpu_device *adev,
+ int i)
+{
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(5, 2, 1):
+ if (adev->sdma.instance[i].fw_version < 70)
+ return false;
+ break;
+ case IP_VERSION(5, 2, 3):
+ if (adev->sdma.instance[i].fw_version < 47)
+ return false;
+ break;
+ case IP_VERSION(5, 2, 7):
+ if (adev->sdma.instance[i].fw_version < 9)
+ return false;
+ break;
+ default:
+ return true;
+ }
+
+ return true;
+
+}
+
static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -1519,7 +1539,7 @@ static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *ade
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1))
+ if (!sdma_v5_2_firmware_mgcg_support(adev, i))
adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_MGCG;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
@@ -1593,6 +1613,7 @@ static int sdma_v5_2_set_clockgating_state(void *handle,
case IP_VERSION(5, 2, 5):
case IP_VERSION(5, 2, 6):
case IP_VERSION(5, 2, 3):
+ case IP_VERSION(5, 2, 7):
sdma_v5_2_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
sdma_v5_2_update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 3d9a80511a45..45be0af2570b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -48,6 +48,7 @@ MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
+MODULE_FIRMWARE("amdgpu/sdma_6_1_0.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880
@@ -238,6 +239,8 @@ static void sdma_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*
* @ring: amdgpu ring pointer
* @ib: IB object to schedule
+ * @flags: unused
+ * @job: job to retrieve vmid from
*
* Schedule an IB in the DMA ring.
*/
@@ -585,16 +588,12 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
- ring->sched.ready = true;
-
if (amdgpu_sriov_vf(adev))
sdma_v6_0_enable(adev, true);
r = amdgpu_ring_test_helper(ring);
- if (r) {
- ring->sched.ready = false;
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -942,6 +941,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
* sdma_v6_0_ring_test_ib - test an IB on the DMA engine
*
* @ring: amdgpu_ring structure holding ring information
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test a simple IB in the DMA ring.
* Returns 0 on success, error on failure.
@@ -1122,6 +1122,7 @@ static void sdma_v6_0_vm_set_pte_pde(struct amdgpu_ib *ib,
/**
* sdma_v6_0_ring_pad_ib - pad the IB
* @ib: indirect buffer to fill with padding
+ * @ring: amdgpu ring pointer
*
* Pad the IB with NOPs to a boundary multiple of 8.
*/
@@ -1171,6 +1172,8 @@ static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* sdma_v6_0_ring_emit_vm_flush - vm flush using sDMA
*
* @ring: amdgpu_ring pointer
+ * @vmid: vmid number to use
+ * @pd_addr: address
*
* Update the page table base and flush the VM TLB
* using sDMA.
@@ -1298,7 +1301,7 @@ static int sdma_v6_0_sw_init(void *handle)
ring->doorbell_index =
(adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset
- ring->vm_hub = AMDGPU_GFXHUB_0;
+ ring->vm_hub = AMDGPU_GFXHUB(0);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 7f99e130acd0..f64b87b11b1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1181,12 +1181,12 @@ static uint32_t si_get_register_value(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index abca8b529721..42c4547f32ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -174,8 +174,6 @@ static int si_dma_start(struct amdgpu_device *adev)
WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
- ring->sched.ready = true;
-
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c
new file mode 100644
index 000000000000..4368a5891eeb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smuio_v13_0_3.h"
+#include "soc15_common.h"
+#include "smuio/smuio_13_0_3_offset.h"
+#include "smuio/smuio_13_0_3_sh_mask.h"
+
+#define PKG_TYPE_MASK 0x00000003L
+
+/**
+ * smuio_v13_0_3_get_die_id - query die id from FCH.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Returns die id
+ */
+static u32 smuio_v13_0_3_get_die_id(struct amdgpu_device *adev)
+{
+ u32 data, die_id;
+
+ data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
+ die_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, DIE_ID);
+
+ return die_id;
+}
+
+/**
+ * smuio_v13_0_3_get_socket_id - query socket id from FCH
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Returns socket id
+ */
+static u32 smuio_v13_0_3_get_socket_id(struct amdgpu_device *adev)
+{
+ u32 data, socket_id;
+
+ data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
+ socket_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, SOCKET_ID);
+
+ return socket_id;
+}
+
+/**
+ * smuio_v13_0_3_get_pkg_type - query package type set by MP1/bootcode
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Returns package type
+ */
+
+static enum amdgpu_pkg_type smuio_v13_0_3_get_pkg_type(struct amdgpu_device *adev)
+{
+ enum amdgpu_pkg_type pkg_type;
+ u32 data;
+
+ data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
+ data = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, PKG_TYPE);
+ /* pkg_type[4:0]
+ *
+ * bit 1 == 1 APU form factor
+ *
+ * b0100 - b1111 - Reserved
+ */
+ switch (data & PKG_TYPE_MASK) {
+ case 0x2:
+ pkg_type = AMDGPU_PKG_TYPE_APU;
+ break;
+ default:
+ pkg_type = AMDGPU_PKG_TYPE_UNKNOWN;
+ break;
+ }
+
+ return pkg_type;
+}
+
+
+const struct amdgpu_smuio_funcs smuio_v13_0_3_funcs = {
+ .get_die_id = smuio_v13_0_3_get_die_id,
+ .get_socket_id = smuio_v13_0_3_get_socket_id,
+ .get_pkg_type = smuio_v13_0_3_get_pkg_type,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h
new file mode 100644
index 000000000000..795f66c5a58b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMUIO_V13_0_3_H__
+#define __SMUIO_V13_0_3_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_smuio_funcs smuio_v13_0_3_funcs;
+
+#endif /* __SMUIO_V13_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 6d15d5cd9e07..c45721ca916e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -153,6 +153,24 @@ static const struct amdgpu_video_codecs rn_video_codecs_decode =
.codec_array = rn_video_codecs_decode_array,
};
+static const struct amdgpu_video_codec_info vcn_4_0_3_video_codecs_decode_array[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_decode = {
+ .codec_count = ARRAY_SIZE(vcn_4_0_3_video_codecs_decode_array),
+ .codec_array = vcn_4_0_3_video_codecs_decode_array,
+};
+
+static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = {
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
@@ -185,6 +203,12 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
else
*codecs = &rn_video_codecs_decode;
return 0;
+ case IP_VERSION(4, 0, 3):
+ if (encode)
+ *codecs = &vcn_4_0_3_video_codecs_encode;
+ else
+ *codecs = &vcn_4_0_3_video_codecs_decode;
+ return 0;
default:
return -EINVAL;
}
@@ -301,17 +325,18 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
- adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
return 10000;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
+ return reference_clock / 4;
return reference_clock;
}
void soc15_grbm_select(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 queue, u32 vmid)
+ u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id)
{
u32 grbm_gfx_cntl = 0;
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
@@ -319,12 +344,7 @@ void soc15_grbm_select(struct amdgpu_device *adev,
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
- WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
-}
-
-static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
-{
- /* todo */
+ WREG32_SOC15_RLC_SHADOW(GC, xcc_id, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
}
static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
@@ -363,12 +383,12 @@ static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_n
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
@@ -532,6 +552,15 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
if (connected_to_cpu)
return AMD_RESET_METHOD_MODE2;
break;
+ case IP_VERSION(13, 0, 6):
+ /* Use gpu_recovery param to target a reset method.
+ * Enable triggering of GPU reset only if specified
+ * by module parameter.
+ */
+ if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
+ return AMD_RESET_METHOD_MODE2;
+ else
+ return AMD_RESET_METHOD_NONE;
default:
break;
}
@@ -816,7 +845,6 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
.reset_method = &soc15_asic_reset_method,
- .set_vga_state = &soc15_vga_set_state,
.get_xclk = &soc15_get_xclk,
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
@@ -838,7 +866,6 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
.reset_method = &soc15_asic_reset_method,
- .set_vga_state = &soc15_vga_set_state,
.get_xclk = &soc15_get_xclk,
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
@@ -853,6 +880,28 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.query_video_codecs = &soc15_query_video_codecs,
};
+static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs =
+{
+ .read_disabled_bios = &soc15_read_disabled_bios,
+ .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
+ .read_register = &soc15_read_register,
+ .reset = &soc15_asic_reset,
+ .reset_method = &soc15_asic_reset_method,
+ .get_xclk = &soc15_get_xclk,
+ .set_uvd_clocks = &soc15_set_uvd_clocks,
+ .set_vce_clocks = &soc15_set_vce_clocks,
+ .get_config_memsize = &soc15_get_config_memsize,
+ .need_full_reset = &soc15_need_full_reset,
+ .init_doorbell_index = &aqua_vanjaram_doorbell_index_init,
+ .get_pcie_usage = &amdgpu_nbio_get_pcie_usage,
+ .need_reset_on_init = &soc15_need_reset_on_init,
+ .get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count,
+ .supports_baco = &soc15_supports_baco,
+ .pre_asic_init = &soc15_pre_asic_init,
+ .query_video_codecs = &soc15_query_video_codecs,
+ .encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing,
+};
+
static int soc15_common_early_init(void *handle)
{
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
@@ -866,6 +915,8 @@ static int soc15_common_early_init(void *handle)
adev->smc_wreg = NULL;
adev->pcie_rreg = &amdgpu_device_indirect_rreg;
adev->pcie_wreg = &amdgpu_device_indirect_wreg;
+ adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
+ adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext;
adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
@@ -1094,9 +1145,18 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x3c;
break;
case IP_VERSION(9, 4, 3):
- adev->asic_funcs = &vega20_asic_funcs;
- adev->cg_flags = 0;
- adev->pg_flags = 0;
+ adev->asic_funcs = &aqua_vanjaram_asic_funcs;
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_GFX_FGCG | AMD_CG_SUPPORT_REPEATER_FGCG |
+ AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG |
+ AMD_CG_SUPPORT_IH_CG;
+ adev->pg_flags =
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG;
+ adev->external_rev_id = adev->rev_id + 0x46;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index efc2a253e8db..eac54042c6c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -100,7 +100,7 @@ struct soc15_ras_field_entry {
#define SOC15_RAS_REG_FIELD_VAL(val, entry, field) SOC15_REG_FIELD_VAL((val), (entry).field##_count_mask, (entry).field##_count_shift)
void soc15_grbm_select(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 queue, u32 vmid);
+ u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id);
void soc15_set_virt_ops(struct amdgpu_device *adev);
void soc15_program_register_sequence(struct amdgpu_device *adev,
@@ -111,7 +111,11 @@ int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
int arct_reg_base_init(struct amdgpu_device *adev);
int aldebaran_reg_base_init(struct amdgpu_device *adev);
+void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev);
+u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id);
+int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev);
void vega10_doorbell_index_init(struct amdgpu_device *adev);
void vega20_doorbell_index_init(struct amdgpu_device *adev);
+void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 9fefd403e14f..da683afa0222 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -24,105 +24,100 @@
#ifndef __SOC15_COMMON_H__
#define __SOC15_COMMON_H__
+/* GET_INST returns the physical instance corresponding to a logical instance */
+#define GET_INST(ip, inst) \
+ (adev->ip_map.logical_to_dev_inst ? \
+ adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst) : inst)
+#define GET_MASK(ip, mask) \
+ (adev->ip_map.logical_to_dev_mask ? \
+ adev->ip_map.logical_to_dev_mask(adev, ip##_HWIP, mask) : mask)
+
/* Register Access Macros */
#define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+#define SOC15_REG_OFFSET1(ip, inst, reg, offset) \
+ (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)+(offset))
-#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \
+#define __WREG32_SOC15_RLC__(reg, value, flag, hwip, inst) \
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \
- amdgpu_sriov_wreg(adev, reg, value, flag, hwip) : \
+ amdgpu_sriov_wreg(adev, reg, value, flag, hwip, inst) : \
WREG32(reg, value))
-#define __RREG32_SOC15_RLC__(reg, flag, hwip) \
+#define __RREG32_SOC15_RLC__(reg, flag, hwip, inst) \
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \
- amdgpu_sriov_rreg(adev, reg, flag, hwip) : \
+ amdgpu_sriov_rreg(adev, reg, flag, hwip, inst) : \
RREG32(reg))
#define WREG32_FIELD15(ip, idx, reg, field, val) \
__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
(__RREG32_SOC15_RLC__( \
adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
- 0, ip##_HWIP) & \
+ 0, ip##_HWIP, idx) & \
~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
- 0, ip##_HWIP)
+ 0, ip##_HWIP, idx)
#define WREG32_FIELD15_PREREG(ip, idx, reg_name, field, val) \
__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name, \
(__RREG32_SOC15_RLC__( \
adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name, \
- 0, ip##_HWIP) & \
+ 0, ip##_HWIP, idx) & \
~REG_FIELD_MASK(reg_name, field)) | (val) << REG_FIELD_SHIFT(reg_name, field), \
- 0, ip##_HWIP)
+ 0, ip##_HWIP, idx)
#define RREG32_SOC15(ip, inst, reg) \
__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
- 0, ip##_HWIP)
+ 0, ip##_HWIP, inst)
-#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP)
+#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP, 0)
-#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0)
#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
- AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+ AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst)
#define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
- __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, 0, ip##_HWIP)
+ __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)) + \
+ (offset), 0, ip##_HWIP, inst)
#define WREG32_SOC15(ip, inst, reg, value) \
__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \
- value, 0, ip##_HWIP)
+ value, 0, ip##_HWIP, inst)
#define WREG32_SOC15_IP(ip, reg, value) \
- __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP)
+ __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP, 0)
#define WREG32_SOC15_IP_NO_KIQ(ip, reg, value) \
- __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+ __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0)
#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
- value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
+ value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst)
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, \
- value, 0, ip##_HWIP)
+ value, 0, ip##_HWIP, inst)
-#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \
-({ int ret = 0; \
- do { \
- uint32_t old_ = 0; \
- uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
- uint32_t loop = adev->usec_timeout; \
- ret = 0; \
- while ((tmp_ & (mask)) != (expected_value)) { \
- if (old_ != tmp_) { \
- loop = adev->usec_timeout; \
- old_ = tmp_; \
- } else \
- udelay(1); \
- tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
- loop--; \
- if (!loop) { \
- DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
- inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \
- ret = -ETIMEDOUT; \
- break; \
- } \
- } \
- } while (0); \
- ret; \
-})
+#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \
+ amdgpu_device_wait_on_rreg(adev, inst, \
+ (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg)), \
+ #reg, expected_value, mask)
+
+#define SOC15_WAIT_ON_RREG_OFFSET(ip, inst, reg, offset, expected_value, mask) \
+ amdgpu_device_wait_on_rreg(adev, inst, \
+ (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + (reg) + (offset)), \
+ #reg, expected_value, mask)
#define WREG32_RLC(reg, value) \
- __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP)
+ __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP, 0)
-#define WREG32_RLC_EX(prefix, reg, value) \
+#define WREG32_RLC_EX(prefix, reg, value, inst) \
do { \
if (amdgpu_sriov_fullaccess(adev)) { \
uint32_t i = 0; \
uint32_t retries = 50000; \
- uint32_t r0 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0; \
- uint32_t r1 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1; \
- uint32_t spare_int = adev->reg_offset[GC_HWIP][0][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT; \
+ uint32_t r0 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0; \
+ uint32_t r1 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1; \
+ uint32_t spare_int = adev->reg_offset[GC_HWIP][inst][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT; \
WREG32(r0, value); \
WREG32(r1, (reg | 0x80000000)); \
WREG32(spare_int, 0x1); \
@@ -141,26 +136,26 @@
/* shadow the registers in the callback function */
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
- __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP)
+ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP, inst)
/* for GC only */
#define RREG32_RLC(reg) \
__RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP)
#define WREG32_RLC_NO_KIQ(reg, value, hwip) \
- __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip)
+ __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0)
#define RREG32_RLC_NO_KIQ(reg, hwip) \
- __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip)
+ __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0)
#define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
if (amdgpu_sriov_fullaccess(adev)) { \
- uint32_t r2 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG2; \
- uint32_t r3 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG3; \
- uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][prefix##GRBM_GFX_CNTL_BASE_IDX] + prefix##GRBM_GFX_CNTL; \
- uint32_t grbm_idx = adev->reg_offset[GC_HWIP][0][prefix##GRBM_GFX_INDEX_BASE_IDX] + prefix##GRBM_GFX_INDEX; \
+ uint32_t r2 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG2; \
+ uint32_t r3 = adev->reg_offset[GC_HWIP][inst][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG3; \
+ uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][inst][prefix##GRBM_GFX_CNTL_BASE_IDX] + prefix##GRBM_GFX_CNTL; \
+ uint32_t grbm_idx = adev->reg_offset[GC_HWIP][inst][prefix##GRBM_GFX_INDEX_BASE_IDX] + prefix##GRBM_GFX_INDEX; \
if (target_reg == grbm_cntl) \
WREG32(r2, value); \
else if (target_reg == grbm_idx) \
@@ -172,31 +167,41 @@
} while (0)
#define RREG32_SOC15_RLC(ip, inst, reg) \
- __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP)
+ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP, inst)
#define WREG32_SOC15_RLC(ip, inst, reg, value) \
do { \
- uint32_t target_reg = adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg;\
- __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP); \
+ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
+ __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP, inst); \
} while (0)
#define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \
do { \
- uint32_t target_reg = adev->reg_offset[GC_HWIP][0][reg##_BASE_IDX] + reg;\
- WREG32_RLC_EX(prefix, target_reg, value); \
+ uint32_t target_reg = adev->reg_offset[GC_HWIP][inst][reg##_BASE_IDX] + reg;\
+ WREG32_RLC_EX(prefix, target_reg, value, inst); \
} while (0)
#define WREG32_FIELD15_RLC(ip, idx, reg, field, val) \
__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
(__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
- AMDGPU_REGS_RLC, ip##_HWIP) & \
+ AMDGPU_REGS_RLC, ip##_HWIP, idx) & \
~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
- AMDGPU_REGS_RLC, ip##_HWIP)
+ AMDGPU_REGS_RLC, ip##_HWIP, idx)
#define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \
- __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP)
+ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP, inst)
#define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \
- __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP)
+ __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP, inst)
+
+/* inst equals to ext for some IPs */
+#define RREG32_SOC15_EXT(ip, inst, reg, ext) \
+ RREG32_PCIE_EXT((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) * 4 \
+ + adev->asic_funcs->encode_ext_smn_addressing(ext)) \
+
+#define WREG32_SOC15_EXT(ip, inst, reg, ext, value) \
+ WREG32_PCIE_EXT((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) * 4 \
+ + adev->asic_funcs->encode_ext_smn_addressing(ext), \
+ value) \
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index d77162536514..40d23738ee4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -48,33 +48,28 @@
static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */
-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] =
-{
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] =
-{
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 =
-{
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = {
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0),
.codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0,
};
-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 =
-{
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 = {
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1),
.codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1,
};
-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] =
-{
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
@@ -82,22 +77,19 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] =
-{
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 =
-{
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 = {
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0),
.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0,
};
-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
-{
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = {
.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1),
.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
};
@@ -248,11 +240,6 @@ void soc21_grbm_select(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, grbm_gfx_cntl);
}
-static void soc21_vga_set_state(struct amdgpu_device *adev, bool state)
-{
- /* todo */
-}
-
static bool soc21_read_disabled_bios(struct amdgpu_device *adev)
{
/* todo */
@@ -288,12 +275,12 @@ static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_n
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
@@ -450,8 +437,7 @@ static void soc21_program_aspm(struct amdgpu_device *adev)
adev->nbio.funcs->program_aspm(adev);
}
-const struct amdgpu_ip_block_version soc21_common_ip_block =
-{
+const struct amdgpu_ip_block_version soc21_common_ip_block = {
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 1,
.minor = 0,
@@ -489,16 +475,6 @@ static bool soc21_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
-static uint64_t soc21_get_pcie_replay_count(struct amdgpu_device *adev)
-{
-
- /* TODO
- * dummy implement for pcie_replay_count sysfs interface
- * */
-
- return 0;
-}
-
static void soc21_init_doorbell_index(struct amdgpu_device *adev)
{
adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
@@ -542,9 +518,9 @@ static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
bool enter)
{
if (enter)
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
else
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
if (adev->gfx.funcs->update_perfmon_mgcg)
adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
@@ -552,14 +528,12 @@ static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
return 0;
}
-static const struct amdgpu_asic_funcs soc21_asic_funcs =
-{
+static const struct amdgpu_asic_funcs soc21_asic_funcs = {
.read_disabled_bios = &soc21_read_disabled_bios,
.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
.read_register = &soc21_read_register,
.reset = &soc21_asic_reset,
.reset_method = &soc21_asic_reset_method,
- .set_vga_state = &soc21_vga_set_state,
.get_xclk = &soc21_get_xclk,
.set_uvd_clocks = &soc21_set_uvd_clocks,
.set_vce_clocks = &soc21_set_vce_clocks,
@@ -567,7 +541,7 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs =
.init_doorbell_index = &soc21_init_doorbell_index,
.need_full_reset = &soc21_need_full_reset,
.need_reset_on_init = &soc21_need_reset_on_init,
- .get_pcie_replay_count = &soc21_get_pcie_replay_count,
+ .get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count,
.supports_baco = &amdgpu_dpm_is_baco_supported,
.pre_asic_init = &soc21_pre_asic_init,
.query_video_codecs = &soc21_query_video_codecs,
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 30d0482ac466..879bb7af297c 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -129,6 +129,8 @@ struct ta_ras_trigger_error_input {
struct ta_ras_init_flags {
uint8_t poison_mode_en;
uint8_t dgpu_mode;
+ uint16_t xcc_mask;
+ uint8_t channel_dis_num;
};
struct ta_ras_output_flags {
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b08905d1c00f..917707bba7f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -493,8 +493,7 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &tonga_ih_funcs;
}
-const struct amdgpu_ip_block_version tonga_ih_ip_block =
-{
+const struct amdgpu_ip_block_version tonga_ih_ip_block = {
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 3,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index d51ae0bc36f7..46bfdee79bfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -444,6 +444,11 @@ static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *ade
umc_v8_10_ecc_info_query_error_address, ras_error_status);
}
+static void umc_v8_10_set_eeprom_table_version(struct amdgpu_ras_eeprom_table_header *hdr)
+{
+ hdr->version = RAS_TABLE_VER_V2_1;
+}
+
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
.query_ras_error_count = umc_v8_10_query_ras_error_count,
.query_ras_error_address = umc_v8_10_query_ras_error_address,
@@ -457,4 +462,5 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
.query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address,
+ .set_eeprom_table_version = umc_v8_10_set_eeprom_table_version,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
index c6dfd433fec7..dc12e0af5451 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
@@ -33,7 +33,8 @@
/* Total channel instances for all available umc nodes */
#define UMC_V8_10_TOTAL_CHANNEL_NUM(adev) \
- (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->gmc.num_umc)
+ (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * \
+ (adev)->gmc.num_umc - hweight32((adev)->gmc.m_half_use) * 2)
/* UMC regiser per channel offset */
#define UMC_V8_10_PER_CHANNEL_OFFSET 0x400
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 0fef925b6602..5534c769b655 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -815,8 +815,7 @@ static const struct amd_ip_funcs uvd_v3_1_ip_funcs = {
.set_powergating_state = uvd_v3_1_set_powergating_state,
};
-const struct amdgpu_ip_block_version uvd_v3_1_ip_block =
-{
+const struct amdgpu_ip_block_version uvd_v3_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_UVD,
.major = 3,
.minor = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index e32b656b3dab..86d1d46e1e5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -444,7 +444,7 @@ static int uvd_v7_0_sw_init(void *handle)
continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "uvd_%d", ring->me);
r = amdgpu_ring_init(adev, ring, 512,
&adev->uvd.inst[j].irq, 0,
@@ -455,7 +455,7 @@ static int uvd_v7_0_sw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
@@ -679,11 +679,11 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
i == 0 ?
- adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo :
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
i == 0 ?
- adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi :
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0;
@@ -1908,8 +1908,7 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
-{
+const struct amdgpu_ip_block_version uvd_v7_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_UVD,
.major = 7,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 8def62c83ffd..18f6e62af339 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -998,8 +998,7 @@ static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
};
-const struct amdgpu_ip_block_version vce_v3_0_ip_block =
-{
+const struct amdgpu_ip_block_version vce_v3_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_VCE,
.major = 3,
.minor = 0,
@@ -1007,8 +1006,7 @@ const struct amdgpu_ip_block_version vce_v3_0_ip_block =
.funcs = &vce_v3_0_ip_funcs,
};
-const struct amdgpu_ip_block_version vce_v3_1_ip_block =
-{
+const struct amdgpu_ip_block_version vce_v3_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_VCE,
.major = 3,
.minor = 1,
@@ -1016,8 +1014,7 @@ const struct amdgpu_ip_block_version vce_v3_1_ip_block =
.funcs = &vce_v3_0_ip_funcs,
};
-const struct amdgpu_ip_block_version vce_v3_4_ip_block =
-{
+const struct amdgpu_ip_block_version vce_v3_4_ip_block = {
.type = AMD_IP_BLOCK_TYPE_VCE,
.major = 3,
.minor = 4,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 57b85bb6a1e4..e0b70cd3b697 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -466,7 +466,7 @@ static int vce_v4_0_sw_init(void *handle)
enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
ring = &adev->vce.ring[i];
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vce%d", i);
if (amdgpu_sriov_vf(adev)) {
/* DOORBELL only works under SRIOV */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 761c28fa6ec1..25ba27151ac0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -120,7 +120,7 @@ static int vcn_v1_0_sw_init(void *handle)
return r;
ring = &adev->vcn.inst->ring_dec;
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vcn_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -142,7 +142,7 @@ static int vcn_v1_0_sw_init(void *handle)
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst->ring_enc[i];
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
hw_prio, NULL);
@@ -211,7 +211,7 @@ static int vcn_v1_0_hw_init(void *handle)
goto done;
}
- ring = &adev->jpeg.inst->ring_dec;
+ ring = adev->jpeg.inst->ring_dec;
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
@@ -473,7 +473,7 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
- data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
@@ -1304,7 +1304,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
/* Restore */
- ring = &adev->jpeg.inst->ring_dec;
+ ring = adev->jpeg.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
@@ -1772,7 +1772,7 @@ static int vcn_v1_0_set_powergating_state(void *handle,
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if(state == adev->vcn.cur_state)
+ if (state == adev->vcn.cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
@@ -1780,7 +1780,7 @@ static int vcn_v1_0_set_powergating_state(void *handle,
else
ret = vcn_v1_0_start(adev);
- if(!ret)
+ if (!ret)
adev->vcn.cur_state = state;
return ret;
}
@@ -1802,7 +1802,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
+ if (amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec))
new_state.jpeg = VCN_DPG_STATE__PAUSE;
else
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
@@ -1810,7 +1810,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
adev->vcn.pause_dpg_mode(adev, 0, &new_state);
}
- fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
+ fences += amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec);
fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
if (fences == 0) {
@@ -1832,7 +1832,7 @@ static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
- if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec))
+ if (amdgpu_fence_wait_empty(ring->adev->jpeg.inst->ring_dec))
DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
@@ -1864,7 +1864,7 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
+ if (amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec))
new_state.jpeg = VCN_DPG_STATE__PAUSE;
else
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
@@ -2065,8 +2065,7 @@ static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
}
-const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
-{
+const struct amdgpu_ip_block_version vcn_v1_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_VCN,
.major = 1,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 7c2b3aa48083..18794394c5a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -129,7 +129,7 @@ static int vcn_v2_0_sw_init(void *handle)
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vcn_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
@@ -160,7 +160,7 @@ static int vcn_v2_0_sw_init(void *handle)
ring = &adev->vcn.inst->ring_enc[i];
ring->use_doorbell = true;
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
if (!amdgpu_sriov_vf(adev))
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
else
@@ -881,9 +881,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect)
- psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
- (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
- (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
+ amdgpu_vcn_psp_update_sram(adev, 0, 0);
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index ab0b45d0ead1..6fbea38f4d3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -143,7 +143,7 @@ static int vcn_v2_5_sw_init(void *handle)
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
- VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq);
+ VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
if (r)
return r;
}
@@ -188,9 +188,9 @@ static int vcn_v2_5_sw_init(void *handle)
(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
- ring->vm_hub = AMDGPU_MMHUB_1;
+ ring->vm_hub = AMDGPU_MMHUB1(0);
else
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vcn_dec_%d", j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
@@ -208,9 +208,9 @@ static int vcn_v2_5_sw_init(void *handle)
(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
- ring->vm_hub = AMDGPU_MMHUB_1;
+ ring->vm_hub = AMDGPU_MMHUB1(0);
else
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
r = amdgpu_ring_init(adev, ring, 512,
@@ -354,6 +354,9 @@ static int vcn_v2_5_hw_fini(void *handle)
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
}
return 0;
@@ -909,9 +912,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect)
- psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
- (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
- (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+ amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1807,6 +1808,14 @@ static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1837,9 +1846,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
break;
- case VCN_2_6__SRCID_UVD_POISON:
- amdgpu_vcn_process_poison_irq(adev, source, entry);
- break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -1854,6 +1860,11 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
.process = vcn_v2_5_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
+ .set = vcn_v2_6_set_ras_interrupt_state,
+ .process = amdgpu_vcn_process_poison_irq,
+};
+
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1863,6 +1874,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
continue;
adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
+
+ adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
}
}
@@ -1965,6 +1979,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
static struct amdgpu_vcn_ras vcn_v2_6_ras = {
.ras_block = {
.hw_ops = &vcn_v2_6_ras_hw_ops,
+ .ras_late_init = amdgpu_vcn_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 3eab186261aa..a61ecefdafc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -189,7 +189,7 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
}
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vcn_dec_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
AMDGPU_RING_PRIO_DEFAULT,
@@ -213,7 +213,7 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
}
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vcn_enc_%d.%d", i, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
hw_prio, &adev->vcn.inst[i].sched_score);
@@ -1037,9 +1037,7 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
if (indirect)
- psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
- (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
- (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+ amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1107,7 +1105,7 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
continue;
}
@@ -1313,7 +1311,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
header.version = MMSCH_VERSION;
header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2;
- for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
+ for (i = 0; i < MMSCH_V3_0_VCN_INSTANCES; i++) {
header.inst[i].init_status = 0;
header.inst[i].table_offset = 0;
header.inst[i].table_size = 0;
@@ -1791,7 +1789,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
struct amdgpu_bo *bo;
uint64_t start, end;
unsigned int i;
- void * ptr;
+ void *ptr;
int r;
addr &= AMDGPU_GMC_HOLE_MASK;
@@ -2097,7 +2095,7 @@ static int vcn_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -2131,7 +2129,7 @@ static int vcn_v3_0_set_powergating_state(void *handle,
return 0;
}
- if(state == adev->vcn.cur_state)
+ if (state == adev->vcn.cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
@@ -2139,7 +2137,7 @@ static int vcn_v3_0_set_powergating_state(void *handle,
else
ret = vcn_v3_0_start(adev);
- if(!ret)
+ if (!ret)
adev->vcn.cur_state = state;
return ret;
@@ -2230,8 +2228,7 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.set_powergating_state = vcn_v3_0_set_powergating_state,
};
-const struct amdgpu_ip_block_version vcn_v3_0_ip_block =
-{
+const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_VCN,
.major = 3,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index bf0674039598..29164289c5f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -129,7 +129,11 @@ static int vcn_v4_0_sw_init(void *handle)
if (adev->vcn.harvest_config & (1 << i))
continue;
- atomic_set(&adev->vcn.inst[i].sched_score, 0);
+ /* Init instance 0 sched_score to 1, so it's scheduled after other instances */
+ if (i == 0)
+ atomic_set(&adev->vcn.inst[i].sched_score, 1);
+ else
+ atomic_set(&adev->vcn.inst[i].sched_score, 0);
/* VCN UNIFIED TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
@@ -139,7 +143,7 @@ static int vcn_v4_0_sw_init(void *handle)
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
- VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
+ VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq);
if (r)
return r;
@@ -149,7 +153,7 @@ static int vcn_v4_0_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1;
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
- ring->vm_hub = AMDGPU_MMHUB_0;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vcn_unified_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
@@ -165,6 +169,12 @@ static int vcn_v4_0_sw_init(void *handle)
fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
+ if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) {
+ fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
+ fw_shared->drm_key_wa.method =
+ AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
+ }
+
if (amdgpu_sriov_vf(adev))
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
@@ -305,8 +315,8 @@ static int vcn_v4_0_hw_fini(void *handle)
vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
}
-
- amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
}
return 0;
@@ -989,9 +999,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
if (indirect)
- psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
- (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
- (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+ amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1131,11 +1139,11 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
if (status & 2)
break;
mdelay(10);
- if (amdgpu_emu_mode==1)
+ if (amdgpu_emu_mode == 1)
msleep(1);
}
- if (amdgpu_emu_mode==1) {
+ if (amdgpu_emu_mode == 1) {
r = -1;
if (status & 2) {
r = 0;
@@ -1239,7 +1247,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
header.version = MMSCH_VERSION;
header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2;
- for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
+ for (i = 0; i < MMSCH_V4_0_VCN_INSTANCES; i++) {
header.inst[i].init_status = 0;
header.inst[i].table_offset = 0;
header.inst[i].table_size = 0;
@@ -1420,8 +1428,10 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
*/
static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
+ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
+ vcn_v4_0_pause_dpg_mode(adev, inst_idx, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
@@ -1794,7 +1804,7 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
return 0;
}
-static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
+static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
@@ -1839,7 +1849,11 @@ static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_unified_ring_vm_funcs;
+ if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2))
+ vcn_v4_0_unified_ring_vm_funcs.secure_submission_supported = true;
+
+ adev->vcn.inst[i].ring_enc[0].funcs =
+ (const struct amdgpu_ring_funcs *)&vcn_v4_0_unified_ring_vm_funcs;
adev->vcn.inst[i].ring_enc[0].me = i;
DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i);
@@ -1904,7 +1918,7 @@ static int vcn_v4_0_wait_for_idle(void *handle)
static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1945,7 +1959,7 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta
return 0;
}
- if(state == adev->vcn.cur_state)
+ if (state == adev->vcn.cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
@@ -1953,7 +1967,7 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta
else
ret = vcn_v4_0_start(adev);
- if(!ret)
+ if (!ret)
adev->vcn.cur_state = state;
return ret;
@@ -1976,6 +1990,24 @@ static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgp
}
/**
+ * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @type: interrupt types
+ * @state: interrupt states
+ *
+ * Set VCN block RAS interrupt state
+ */
+static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+/**
* vcn_v4_0_process_interrupt - process VCN block interrupt
*
* @adev: amdgpu_device pointer
@@ -2007,9 +2039,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
break;
- case VCN_4_0__SRCID_UVD_POISON:
- amdgpu_vcn_process_poison_irq(adev, source, entry);
- break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@@ -2024,6 +2053,11 @@ static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
.process = vcn_v4_0_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = {
+ .set = vcn_v4_0_set_ras_interrupt_state,
+ .process = amdgpu_vcn_process_poison_irq,
+};
+
/**
* vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions
*
@@ -2041,6 +2075,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
+
+ adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
}
}
@@ -2064,8 +2101,7 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.set_powergating_state = vcn_v4_0_set_powergating_state,
};
-const struct amdgpu_ip_block_version vcn_v4_0_ip_block =
-{
+const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_VCN,
.major = 4,
.minor = 0,
@@ -2114,6 +2150,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
static struct amdgpu_vcn_ras vcn_v4_0_ras = {
.ras_block = {
.hw_ops = &vcn_v4_0_ras_hw_ops,
+ .ras_late_init = amdgpu_vcn_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
new file mode 100644
index 000000000000..f85d18cd74ec
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -0,0 +1,1767 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <drm/drm_drv.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "soc15_hw_ip.h"
+#include "vcn_v2_0.h"
+#include "mmsch_v4_0_3.h"
+
+#include "vcn/vcn_4_0_3_offset.h"
+#include "vcn/vcn_4_0_3_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+
+#define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
+#define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
+#define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
+#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
+
+#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
+#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+
+static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);
+static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
+static int vcn_v4_0_3_set_powergating_state(void *handle,
+ enum amd_powergating_state state);
+static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
+ int inst_idx, struct dpg_pause_state *new_state);
+static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
+static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
+static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
+ int inst_idx, bool indirect);
+/**
+ * vcn_v4_0_3_early_init - set function pointers
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Set ring and irq function pointers
+ */
+static int vcn_v4_0_3_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* re-use enc ring as unified ring */
+ adev->vcn.num_enc_rings = 1;
+
+ vcn_v4_0_3_set_unified_ring_funcs(adev);
+ vcn_v4_0_3_set_irq_funcs(adev);
+ vcn_v4_0_3_set_ras_funcs(adev);
+
+ return amdgpu_vcn_early_init(adev);
+}
+
+/**
+ * vcn_v4_0_3_sw_init - sw init for VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Load firmware and sw initialization
+ */
+static int vcn_v4_0_3_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i, r, vcn_inst;
+
+ r = amdgpu_vcn_sw_init(adev);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev);
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ /* VCN DEC TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ vcn_inst = GET_INST(VCN, i);
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->use_doorbell = true;
+
+ if (!amdgpu_sriov_vf(adev))
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 9 * vcn_inst;
+ else
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 32 * vcn_inst;
+
+ ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
+ sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT,
+ &adev->vcn.inst[i].sched_score);
+ if (r)
+ return r;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = true;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ }
+
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize vcn ras block!\n");
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v4_0_3_sw_fini - sw fini for VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * VCN suspend and free up sw allocation
+ */
+static int vcn_v4_0_3_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, r, idx;
+
+ if (drm_dev_enter(&adev->ddev, &idx)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ fw_shared->sq.is_enabled = cpu_to_le32(false);
+ }
+ drm_dev_exit(idx);
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
+ r = amdgpu_vcn_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v4_0_3_hw_init - start and test VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int vcn_v4_0_3_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ring *ring;
+ int i, r, vcn_inst;
+
+ if (amdgpu_sriov_vf(adev)) {
+ r = vcn_v4_0_3_start_sriov(adev);
+ if (r)
+ goto done;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v4_0_3_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ } else {
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell) {
+ adev->nbio.funcs->vcn_doorbell_range(
+ adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 9 * vcn_inst,
+ adev->vcn.inst[i].aid_id);
+
+ WREG32_SOC15(
+ VCN, GET_INST(VCN, ring->me),
+ regVCN_RB1_DB_CTRL,
+ ring->doorbell_index
+ << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(
+ VCN, GET_INST(VCN, ring->me),
+ regVCN_RB1_DB_CTRL);
+ }
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ goto done;
+ }
+ }
+
+done:
+ if (!r)
+ DRM_DEV_INFO(adev->dev, "VCN decode initialized successfully(under %s).\n",
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
+
+ return r;
+}
+
+/**
+ * vcn_v4_0_3_hw_fini - stop the hardware block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Stop the VCN block, mark ring as not ready any more
+ */
+static int vcn_v4_0_3_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
+ vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+ return 0;
+}
+
+/**
+ * vcn_v4_0_3_suspend - suspend VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * HW fini and suspend VCN block
+ */
+static int vcn_v4_0_3_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = vcn_v4_0_3_hw_fini(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_suspend(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v4_0_3_resume - resume VCN block
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Resume firmware and hw init VCN block
+ */
+static int vcn_v4_0_3_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ r = vcn_v4_0_3_hw_init(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v4_0_3_mc_resume - memory controller programming
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number
+ *
+ * Let the VCN memory controller know it's offsets
+ */
+static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
+{
+ uint32_t offset, size, vcn_inst;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(
+ VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
+ .tmr_mc_addr_lo));
+ WREG32_SOC15(
+ VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
+ .tmr_mc_addr_hi));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
+ offset = size;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
+
+ /* cache window 1: stack */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1,
+ AMDGPU_VCN_STACK_SIZE);
+
+ /* cache window 2: context */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2,
+ AMDGPU_VCN_CONTEXT_SIZE);
+
+ /* non-cache window */
+ WREG32_SOC15(
+ VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
+ WREG32_SOC15(
+ VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(
+ VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+}
+
+/**
+ * vcn_v4_0_3_mc_resume_dpg_mode - memory controller programming for dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Let the VCN memory controller know it's offsets with dpg mode
+ */
+static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ uint32_t offset, size;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (!indirect) {
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
+ inst_idx].tmr_mc_addr_lo), 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
+ inst_idx].tmr_mc_addr_hi), 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ } else {
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ }
+ offset = 0;
+ } else {
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ offset = size;
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
+ }
+
+ if (!indirect)
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ else
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+
+ /* cache window 1: stack */
+ if (!indirect) {
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ } else {
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ }
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+
+ /* cache window 2: context */
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+
+ /* non-cache window */
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+}
+
+/**
+ * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number
+ *
+ * Disable clock gating for VCN block
+ */
+static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+{
+ uint32_t data;
+ int vcn_inst;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ return;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+
+ /* VCN disable CGC */
+ data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
+ data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
+ data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE);
+ data &= ~(UVD_CGC_GATE__SYS_MASK
+ | UVD_CGC_GATE__MPEG2_MASK
+ | UVD_CGC_GATE__REGS_MASK
+ | UVD_CGC_GATE__RBC_MASK
+ | UVD_CGC_GATE__LMI_MC_MASK
+ | UVD_CGC_GATE__LMI_UMC_MASK
+ | UVD_CGC_GATE__MPC_MASK
+ | UVD_CGC_GATE__LBSI_MASK
+ | UVD_CGC_GATE__LRBBM_MASK
+ | UVD_CGC_GATE__WCB_MASK
+ | UVD_CGC_GATE__VCPU_MASK
+ | UVD_CGC_GATE__MMSCH_MASK);
+
+ WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE, data);
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
+
+ data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
+ data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK
+ | UVD_CGC_CTRL__MPEG2_MODE_MASK
+ | UVD_CGC_CTRL__REGS_MODE_MASK
+ | UVD_CGC_CTRL__RBC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_MC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
+ | UVD_CGC_CTRL__MPC_MODE_MASK
+ | UVD_CGC_CTRL__LBSI_MODE_MASK
+ | UVD_CGC_CTRL__LRBBM_MODE_MASK
+ | UVD_CGC_CTRL__WCB_MODE_MASK
+ | UVD_CGC_CTRL__VCPU_MODE_MASK
+ | UVD_CGC_CTRL__MMSCH_MODE_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE);
+ data |= (UVD_SUVD_CGC_GATE__SRE_MASK
+ | UVD_SUVD_CGC_GATE__SIT_MASK
+ | UVD_SUVD_CGC_GATE__SMP_MASK
+ | UVD_SUVD_CGC_GATE__SCM_MASK
+ | UVD_SUVD_CGC_GATE__SDB_MASK
+ | UVD_SUVD_CGC_GATE__SRE_H264_MASK
+ | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SIT_H264_MASK
+ | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SCM_H264_MASK
+ | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__SDB_H264_MASK
+ | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
+ | UVD_SUVD_CGC_GATE__ENT_MASK
+ | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
+ | UVD_SUVD_CGC_GATE__SITE_MASK
+ | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
+ | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
+ | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
+ | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
+ | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE, data);
+
+ data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
+ data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
+}
+
+/**
+ * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @sram_sel: sram select
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Disable clock gating for VCN block with dpg mode
+ */
+static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
+ int inst_idx, uint8_t indirect)
+{
+ uint32_t reg_data = 0;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ return;
+
+ /* enable sw clock gating control */
+ reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK |
+ UVD_CGC_CTRL__MPEG2_MODE_MASK |
+ UVD_CGC_CTRL__REGS_MODE_MASK |
+ UVD_CGC_CTRL__RBC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_MC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
+ UVD_CGC_CTRL__IDCT_MODE_MASK |
+ UVD_CGC_CTRL__MPRD_MODE_MASK |
+ UVD_CGC_CTRL__MPC_MODE_MASK |
+ UVD_CGC_CTRL__LBSI_MODE_MASK |
+ UVD_CGC_CTRL__LRBBM_MODE_MASK |
+ UVD_CGC_CTRL__WCB_MODE_MASK |
+ UVD_CGC_CTRL__VCPU_MODE_MASK);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+
+ /* turn off clock gating */
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect);
+
+ /* turn on SUVD clock gating */
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+
+ /* turn on sw mode in UVD_SUVD_CGC_CTRL */
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+}
+
+/**
+ * vcn_v4_0_3_enable_clock_gating - enable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number
+ *
+ * Enable clock gating for VCN block
+ */
+static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
+{
+ uint32_t data;
+ int vcn_inst;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
+ return;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+
+ /* enable VCN CGC */
+ data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
+ data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
+ data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
+ data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
+ data |= (UVD_CGC_CTRL__SYS_MODE_MASK
+ | UVD_CGC_CTRL__MPEG2_MODE_MASK
+ | UVD_CGC_CTRL__REGS_MODE_MASK
+ | UVD_CGC_CTRL__RBC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_MC_MODE_MASK
+ | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
+ | UVD_CGC_CTRL__MPC_MODE_MASK
+ | UVD_CGC_CTRL__LBSI_MODE_MASK
+ | UVD_CGC_CTRL__LRBBM_MODE_MASK
+ | UVD_CGC_CTRL__WCB_MODE_MASK
+ | UVD_CGC_CTRL__VCPU_MODE_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
+
+ data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
+ data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
+ | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
+}
+
+/**
+ * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Start VCN block with dpg mode
+ */
+static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_ring *ring;
+ int vcn_inst;
+ uint32_t tmp;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+ /* enable dynamic power gating mode */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
+ tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+ tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
+
+ if (indirect) {
+ DRM_DEV_DEBUG(adev->dev, "VCN %d start: on AID %d",
+ inst_idx, adev->vcn.inst[inst_idx].aid_id);
+ adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
+ (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
+ /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
+ WREG32_SOC15_DPG_MODE(inst_idx, 0xDEADBEEF,
+ adev->vcn.inst[inst_idx].aid_id, 0, true);
+ }
+
+ /* enable clock gating */
+ vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+
+ /* enable VCPU clock */
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
+
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* disable master interrupt */
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ 0x00100000L);
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
+
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MPC_CNTL),
+ 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
+
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MPC_SET_MUXA0),
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
+
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MPC_SET_MUXB0),
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
+
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MPC_SET_MUX),
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
+
+ vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
+
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* enable LMI MC and UMC channels */
+ tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
+
+ vcn_v4_0_3_enable_ras(adev, inst_idx, indirect);
+
+ /* enable master interrupt */
+ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
+
+ if (indirect)
+ amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+
+ /* program the RB_BASE for ring buffer */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
+ upper_32_bits(ring->gpu_addr));
+
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
+ ring->ring_size / sizeof(uint32_t));
+
+ /* resetting ring, fw should not check RB ring */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+
+ /*resetting done, fw can check RB ring */
+ fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+
+ return 0;
+}
+
+static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
+{
+ int i, vcn_inst;
+ struct amdgpu_ring *ring_enc;
+ uint64_t cache_addr;
+ uint64_t rb_enc_addr;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t offset, cache_size;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw;
+ uint32_t init_status;
+ uint32_t enabled_vcn;
+
+ struct mmsch_v4_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v4_0_cmd_direct_read_modify_write
+ direct_rd_mod_wt = { {0} };
+ struct mmsch_v4_0_cmd_end end = { {0} };
+ struct mmsch_v4_0_3_init_header header;
+
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ vcn_inst = GET_INST(VCN, i);
+
+ memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ table_size = 0;
+
+ MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+
+ offset = 0;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0), 0);
+ } else {
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = cache_size;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE0),
+ cache_size);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET1), 0);
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE;
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET2), 0);
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
+
+ fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
+ rb_setup = &fw_shared->rb_setup;
+
+ ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
+ ring_enc->wptr = 0;
+ rb_enc_addr = ring_enc->gpu_addr;
+
+ rb_setup->is_rb_enabled_flags |= RB_ENABLED;
+ rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
+ rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
+ rb_setup->rb_size = ring_enc->ring_size / 4;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+ MMSCH_V4_0_INSERT_END();
+
+ header.vcn0.init_status = 0;
+ header.vcn0.table_offset = header.total_size;
+ header.vcn0.table_size = table_size;
+ header.total_size += table_size;
+
+ /* Send init table to mmsch */
+ size = sizeof(struct mmsch_v4_0_3_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
+ if (resp != 0)
+ break;
+
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+
+ enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
+ init_status = ((struct mmsch_v4_0_3_init_header *)(table_loc))->vcn0.init_status;
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
+ && init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
+ "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v4_0_3_start - VCN start
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Start VCN block
+ */
+static int vcn_v4_0_3_start(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_ring *ring;
+ int i, j, k, r, vcn_inst;
+ uint32_t tmp;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+ continue;
+ }
+
+ vcn_inst = GET_INST(VCN, i);
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
+ UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
+
+ /*SW clock gating */
+ vcn_v4_0_3_disable_clock_gating(adev, i);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK,
+ ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
+ tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup regUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v4_0_3_mc_resume(adev, i);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, vcn_inst,
+ regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
+
+ DRM_DEV_ERROR(adev->dev,
+ "VCN decode not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
+ regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
+ regUVD_VCPU_CNTL),
+ 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
+ }
+
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
+ return r;
+ }
+
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+
+ /* program the RB_BASE for ring buffer */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
+ lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
+ upper_32_bits(ring->gpu_addr));
+
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
+ ring->ring_size / sizeof(uint32_t));
+
+ /* resetting ring, fw should not check RB ring */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+ fw_shared->sq.queue_mode &=
+ cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
+
+ }
+ return 0;
+}
+
+/**
+ * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ *
+ * Stop VCN block with dpg mode
+ */
+static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+ uint32_t tmp;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+
+ /* Wait for power status to be 1 */
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
+
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ return 0;
+}
+
+/**
+ * vcn_v4_0_3_stop - VCN stop
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop VCN block
+ */
+static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ int i, r = 0, vcn_inst;
+ uint32_t tmp;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v4_0_3_stop_dpg_mode(adev, i);
+ continue;
+ }
+
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
+ UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto Done;
+
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
+ tmp);
+ if (r)
+ goto Done;
+
+ /* stall UMC channel */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
+ tmp);
+ if (r)
+ goto Done;
+
+ /* Unblock VCPU Register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+
+ /* reset LMI UMC/LMI/VCPU */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* clear VCN status */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
+
+ /* apply HW clock gating */
+ vcn_v4_0_3_enable_clock_gating(adev, i);
+ }
+Done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+
+ return 0;
+}
+
+/**
+ * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @new_state: pause state
+ *
+ * Pause dpg mode for VCN block
+ */
+static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
+ struct dpg_pause_state *new_state)
+{
+
+ return 0;
+}
+
+/**
+ * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified read pointer
+ */
+static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
+}
+
+/**
+ * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified write pointer
+ */
+static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell)
+ return *ring->wptr_cpu_addr;
+ else
+ return RREG32_SOC15(VCN, GET_INST(VCN, ring->me),
+ regUVD_RB_WPTR);
+}
+
+/**
+ * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the enc write pointer to the hardware
+ */
+static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell) {
+ *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ }
+}
+
+static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_ENC,
+ .align_mask = 0x3f,
+ .nop = VCN_ENC_CMD_NO_OP,
+ .get_rptr = vcn_v4_0_3_unified_ring_get_rptr,
+ .get_wptr = vcn_v4_0_3_unified_ring_get_wptr,
+ .set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
+ .emit_ib = vcn_v2_0_enc_ring_emit_ib,
+ .emit_fence = vcn_v2_0_enc_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_enc_ring_test_ring,
+ .test_ib = amdgpu_vcn_unified_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = vcn_v2_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+/**
+ * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set unified ring functions
+ */
+static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
+{
+ int i, vcn_inst;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs;
+ adev->vcn.inst[i].ring_enc[0].me = i;
+ vcn_inst = GET_INST(VCN, i);
+ adev->vcn.inst[i].aid_id =
+ vcn_inst / adev->vcn.num_inst_per_aid;
+ }
+ DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n");
+}
+
+/**
+ * vcn_v4_0_3_is_idle - check VCN block is idle
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Check whether VCN block is idle
+ */
+static bool vcn_v4_0_3_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 1;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) ==
+ UVD_STATUS__IDLE);
+ }
+
+ return ret;
+}
+
+/**
+ * vcn_v4_0_3_wait_for_idle - wait for VCN block idle
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Wait for VCN block idle
+ */
+static int vcn_v4_0_3_wait_for_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS,
+ UVD_STATUS__IDLE, UVD_STATUS__IDLE);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
+ *
+ * @handle: amdgpu_device pointer
+ * @state: clock gating state
+ *
+ * Set VCN block clockgating state
+ */
+static int vcn_v4_0_3_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = state == AMD_CG_STATE_GATE;
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (enable) {
+ if (RREG32_SOC15(VCN, GET_INST(VCN, i),
+ regUVD_STATUS) != UVD_STATUS__IDLE)
+ return -EBUSY;
+ vcn_v4_0_3_enable_clock_gating(adev, i);
+ } else {
+ vcn_v4_0_3_disable_clock_gating(adev, i);
+ }
+ }
+ return 0;
+}
+
+/**
+ * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
+ *
+ * @handle: amdgpu_device pointer
+ * @state: power gating state
+ *
+ * Set VCN block powergating state
+ */
+static int vcn_v4_0_3_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ /* for SRIOV, guest should not control VCN Power-gating
+ * MMSCH FW should control Power-gating and clock-gating
+ * guest should avoid touching CGC and PG
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
+ if (state == adev->vcn.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = vcn_v4_0_3_stop(adev);
+ else
+ ret = vcn_v4_0_3_start(adev);
+
+ if (!ret)
+ adev->vcn.cur_state = state;
+
+ return ret;
+}
+
+/**
+ * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @type: interrupt types
+ * @state: interrupt states
+ *
+ * Set VCN block interrupt state
+ */
+static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+/**
+ * vcn_v4_0_3_process_interrupt - process VCN block interrupt
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @entry: interrupt entry from clients and sources
+ *
+ * Process VCN block interrupt
+ */
+static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t i, inst;
+
+ i = node_id_to_phys_map[entry->node_id];
+
+ DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
+ if (adev->vcn.inst[inst].aid_id == i)
+ break;
+
+ if (inst >= adev->vcn.num_vcn_inst) {
+ dev_WARN_ONCE(adev->dev, 1,
+ "Interrupt received for unknown VCN instance %d",
+ entry->node_id);
+ return 0;
+ }
+
+ switch (entry->src_id) {
+ case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
+ amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
+ break;
+ default:
+ DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
+ .set = vcn_v4_0_3_set_interrupt_state,
+ .process = vcn_v4_0_3_process_interrupt,
+};
+
+/**
+ * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set VCN block interrupt irq functions
+ */
+static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst->irq.num_types++;
+ }
+ adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
+}
+
+static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
+ .name = "vcn_v4_0_3",
+ .early_init = vcn_v4_0_3_early_init,
+ .late_init = NULL,
+ .sw_init = vcn_v4_0_3_sw_init,
+ .sw_fini = vcn_v4_0_3_sw_fini,
+ .hw_init = vcn_v4_0_3_hw_init,
+ .hw_fini = vcn_v4_0_3_hw_fini,
+ .suspend = vcn_v4_0_3_suspend,
+ .resume = vcn_v4_0_3_resume,
+ .is_idle = vcn_v4_0_3_is_idle,
+ .wait_for_idle = vcn_v4_0_3_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
+ .set_powergating_state = vcn_v4_0_3_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_VCN,
+ .major = 4,
+ .minor = 0,
+ .rev = 3,
+ .funcs = &vcn_v4_0_3_ip_funcs,
+};
+
+static const struct amdgpu_ras_err_status_reg_entry vcn_v4_0_3_ue_reg_list[] = {
+ {AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDD, regVCN_UE_ERR_STATUS_HI_VIDD),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDD"},
+ {AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDV, regVCN_UE_ERR_STATUS_HI_VIDV),
+ 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDV"},
+};
+
+static void vcn_v4_0_3_inst_query_ras_error_count(struct amdgpu_device *adev,
+ uint32_t vcn_inst,
+ void *ras_err_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
+
+ /* vcn v4_0_3 only support query uncorrectable errors */
+ amdgpu_ras_inst_query_ras_error_count(adev,
+ vcn_v4_0_3_ue_reg_list,
+ ARRAY_SIZE(vcn_v4_0_3_ue_reg_list),
+ NULL, 0, GET_INST(VCN, vcn_inst),
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ &err_data->ue_count);
+}
+
+static void vcn_v4_0_3_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_err_status)
+{
+ uint32_t i;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
+ dev_warn(adev->dev, "VCN RAS is not supported\n");
+ return;
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ vcn_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status);
+}
+
+static void vcn_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device *adev,
+ uint32_t vcn_inst)
+{
+ amdgpu_ras_inst_reset_ras_error_count(adev,
+ vcn_v4_0_3_ue_reg_list,
+ ARRAY_SIZE(vcn_v4_0_3_ue_reg_list),
+ GET_INST(VCN, vcn_inst));
+}
+
+static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
+ dev_warn(adev->dev, "VCN RAS is not supported\n");
+ return;
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ vcn_v4_0_3_inst_reset_ras_error_count(adev, i);
+}
+
+static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
+ .query_ras_error_count = vcn_v4_0_3_query_ras_error_count,
+ .reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
+};
+
+static struct amdgpu_vcn_ras vcn_v4_0_3_ras = {
+ .ras_block = {
+ .hw_ops = &vcn_v4_0_3_ras_hw_ops,
+ },
+};
+
+static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
+{
+ adev->vcn.ras = &vcn_v4_0_3_ras;
+}
+
+static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
+ int inst_idx, bool indirect)
+{
+ uint32_t tmp;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ return;
+
+ tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
+ VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
+ VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
+ VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
+ WREG32_SOC15_DPG_MODE(inst_idx,
+ SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL),
+ tmp, 0, indirect);
+
+ tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
+ WREG32_SOC15_DPG_MODE(inst_idx,
+ SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN),
+ tmp, 0, indirect);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
new file mode 100644
index 000000000000..0b046114373a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VCN_V4_0_3_H__
+#define __VCN_V4_0_3_H__
+
+extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block;
+
+#endif /* __VCN_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 1e83db0c5438..d364c6dd152c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -485,7 +485,7 @@ static int vega10_ih_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, true);
if (r)
return r;
@@ -510,7 +510,7 @@ static int vega10_ih_sw_init(void *handle)
/* initialize ih control registers offset */
vega10_ih_init_register_offset(adev);
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 536128447b71..dbc99536440f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -334,7 +334,8 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
vega20_setup_retry_doorbell(adev->irq.retry_cam_doorbell_index));
/* Enable IH Retry CAM */
- if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0))
+ if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0) ||
+ adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))
WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL_ALDEBARAN,
ENABLE, 1);
else
@@ -499,7 +500,8 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev,
case 2:
schedule_work(&adev->irq.ih2_work);
break;
- default: break;
+ default:
+ break;
}
return 0;
}
@@ -526,6 +528,7 @@ static int vega20_ih_early_init(void *handle)
static int vega20_ih_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool use_bus_addr = true;
int r;
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
@@ -533,14 +536,18 @@ static int vega20_ih_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true);
+ if ((adev->flags & AMD_IS_APU) &&
+ (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2)))
+ use_bus_addr = false;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
if (r)
return r;
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, use_bus_addr);
if (r)
return r;
@@ -559,7 +566,7 @@ static int vega20_ih_sw_init(void *handle)
/* initialize ih control registers offset */
vega20_ih_init_register_offset(adev);
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, use_bus_addr);
if (r)
return r;
@@ -704,8 +711,7 @@ static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev)
adev->irq.ih_funcs = &vega20_ih_funcs;
}
-const struct amdgpu_ip_block_version vega20_ih_ip_block =
-{
+const struct amdgpu_ip_block_version vega20_ih_ip_block = {
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 4,
.minor = 2,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 531f173ade2d..6a8494f98d3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
- if (adev->flags & AMD_IS_APU)
- return reference_clock;
+ if (adev->flags & AMD_IS_APU) {
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ /* vbios says 48Mhz, but the actual freq is 100Mhz */
+ return 10000;
+ default:
+ return reference_clock;
+ }
+ }
tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
@@ -580,11 +587,6 @@ void vi_srbm_select(struct amdgpu_device *adev,
WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
}
-static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
-{
- /* todo */
-}
-
static bool vi_read_disabled_bios(struct amdgpu_device *adev)
{
u32 bus_cntl;
@@ -762,12 +764,12 @@ static uint32_t vi_get_register_value(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
} else {
@@ -1435,7 +1437,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.read_register = &vi_read_register,
.reset = &vi_asic_reset,
.reset_method = &vi_asic_reset_method,
- .set_vga_state = &vi_vga_set_state,
.get_xclk = &vi_get_xclk,
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index 93bd4eda0d94..d3c3d3ab7225 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -6,7 +6,6 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
- imply AMD_IOMMU_V2 if X86_64
select HMM_MIRROR
select MMU_NOTIFIER
select DRM_AMDGPU_USERPTR
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index e758c2a24cd0..a5ae7bcf44eb 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -53,13 +53,11 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
$(AMDKFD_PATH)/kfd_events.o \
$(AMDKFD_PATH)/cik_event_interrupt.o \
$(AMDKFD_PATH)/kfd_int_process_v9.o \
+ $(AMDKFD_PATH)/kfd_int_process_v10.o \
$(AMDKFD_PATH)/kfd_int_process_v11.o \
$(AMDKFD_PATH)/kfd_smi_events.o \
- $(AMDKFD_PATH)/kfd_crat.o
-
-ifneq ($(CONFIG_AMD_IOMMU_V2),)
-AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o
-endif
+ $(AMDKFD_PATH)/kfd_crat.o \
+ $(AMDKFD_PATH)/kfd_debug.o
ifneq ($(CONFIG_DEBUG_FS),)
AMDKFD_FILES += $(AMDKFD_PATH)/kfd_debugfs.o
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 5c8023cba196..795382b55e0a 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -26,7 +26,7 @@
#include "amdgpu_amdkfd.h"
#include "kfd_smi_events.h"
-static bool cik_event_interrupt_isr(struct kfd_dev *dev,
+static bool cik_event_interrupt_isr(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
bool *patched_flag)
@@ -85,7 +85,7 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
!amdgpu_no_queue_eviction_on_vm_fault);
}
-static void cik_event_interrupt_wq(struct kfd_dev *dev,
+static void cik_event_interrupt_wq(struct kfd_node *dev,
const uint32_t *ih_ring_entry)
{
const struct cik_ih_ring_entry *ihre =
@@ -118,9 +118,9 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
return;
if (info.vmid == vmid)
- kfd_signal_vm_fault_event(dev, pasid, &info);
+ kfd_signal_vm_fault_event(dev, pasid, &info, NULL);
else
- kfd_signal_vm_fault_event(dev, pasid, NULL);
+ kfd_signal_vm_fault_event(dev, pasid, NULL, NULL);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 73ca9aebf086..d7cd5fa313ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -274,16 +274,16 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf820254,
+ 0xbf820001, 0xbf820258,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
0xbf840009, 0x866eff6d,
0x00ff0000, 0xbf85001e,
0x866eff7b, 0x00000400,
- 0xbf850051, 0xbf8e0010,
+ 0xbf850055, 0xbf8e0010,
0xb8fbf803, 0xbf82fffa,
- 0x866eff7b, 0x00000900,
+ 0x866eff7b, 0x03c00900,
0xbf850015, 0x866eff7b,
0x000071ff, 0xbf840008,
0x866fff7b, 0x00007080,
@@ -294,13 +294,15 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0xbf850007, 0xb8eef801,
0x866eff6e, 0x00000800,
0xbf850003, 0x866eff7b,
- 0x00000400, 0xbf850036,
+ 0x00000400, 0xbf85003a,
0xb8faf807, 0x867aff7a,
0x001f8000, 0x8e7a8b7a,
0x8977ff77, 0xfc000000,
0x87777a77, 0xba7ff807,
0x00000000, 0xb8faf812,
0xb8fbf813, 0x8efa887a,
+ 0xbf0d8f7b, 0xbf840002,
+ 0x877bff7b, 0xffff0000,
0xc0031bbd, 0x00000010,
0xbf8cc07f, 0x8e6e976e,
0x8977ff77, 0x00800000,
@@ -676,14 +678,14 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
- 0xbf820001, 0xbf8201f1,
+ 0xbf820001, 0xbf8201f5,
0xb0804004, 0xb978f802,
0x8a78ff78, 0x00020006,
0xb97bf803, 0x876eff78,
0x00002000, 0xbf840009,
0x876eff6d, 0x00ff0000,
0xbf85001e, 0x876eff7b,
- 0x00000400, 0xbf850057,
+ 0x00000400, 0xbf85005b,
0xbf8e0010, 0xb97bf803,
0xbf82fffa, 0x876eff7b,
0x00000900, 0xbf850015,
@@ -697,7 +699,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb96ef801, 0x876eff6e,
0x00000800, 0xbf850003,
0x876eff7b, 0x00000400,
- 0xbf85003c, 0x8a77ff77,
+ 0xbf850040, 0x8a77ff77,
0xff000000, 0xb97af807,
0x877bff7a, 0x02000000,
0x8f7b867b, 0x88777b77,
@@ -706,6 +708,8 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0x8a7aff7a, 0x023f8000,
0xb9faf807, 0xb97af812,
0xb97bf813, 0x8ffa887a,
+ 0xbf0d8f7b, 0xbf840002,
+ 0x887bff7b, 0xffff0000,
0xf4011bbd, 0xfa000010,
0xbf8cc07f, 0x8f6e976e,
0x8a77ff77, 0x00800000,
@@ -1094,16 +1098,16 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
- 0xbf820001, 0xbf8202d0,
+ 0xbf820001, 0xbf8202d4,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
0xbf840009, 0x866eff6d,
0x00ff0000, 0xbf85001e,
0x866eff7b, 0x00000400,
- 0xbf850051, 0xbf8e0010,
+ 0xbf850055, 0xbf8e0010,
0xb8fbf803, 0xbf82fffa,
- 0x866eff7b, 0x00000900,
+ 0x866eff7b, 0x03c00900,
0xbf850015, 0x866eff7b,
0x000071ff, 0xbf840008,
0x866fff7b, 0x00007080,
@@ -1114,13 +1118,15 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0xbf850007, 0xb8eef801,
0x866eff6e, 0x00000800,
0xbf850003, 0x866eff7b,
- 0x00000400, 0xbf850036,
+ 0x00000400, 0xbf85003a,
0xb8faf807, 0x867aff7a,
0x001f8000, 0x8e7a8b7a,
0x8977ff77, 0xfc000000,
0x87777a77, 0xba7ff807,
0x00000000, 0xb8faf812,
0xb8fbf813, 0x8efa887a,
+ 0xbf0d8f7b, 0xbf840002,
+ 0x877bff7b, 0xffff0000,
0xc0031bbd, 0x00000010,
0xbf8cc07f, 0x8e6e976e,
0x8977ff77, 0x00800000,
@@ -1572,16 +1578,16 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
- 0xbf820001, 0xbf8202db,
+ 0xbf820001, 0xbf8202df,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
0xbf840009, 0x866eff6d,
0x00ff0000, 0xbf85001e,
0x866eff7b, 0x00000400,
- 0xbf850051, 0xbf8e0010,
+ 0xbf850055, 0xbf8e0010,
0xb8fbf803, 0xbf82fffa,
- 0x866eff7b, 0x00000900,
+ 0x866eff7b, 0x03c00900,
0xbf850015, 0x866eff7b,
0x000071ff, 0xbf840008,
0x866fff7b, 0x00007080,
@@ -1592,13 +1598,15 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0xbf850007, 0xb8eef801,
0x866eff6e, 0x00000800,
0xbf850003, 0x866eff7b,
- 0x00000400, 0xbf850036,
+ 0x00000400, 0xbf85003a,
0xb8faf807, 0x867aff7a,
0x001f8000, 0x8e7a8b7a,
0x8977ff77, 0xfc000000,
0x87777a77, 0xba7ff807,
0x00000000, 0xb8faf812,
0xb8fbf813, 0x8efa887a,
+ 0xbf0d8f7b, 0xbf840002,
+ 0x877bff7b, 0xffff0000,
0xc0031bbd, 0x00000010,
0xbf8cc07f, 0x8e6e976e,
0x8977ff77, 0x00800000,
@@ -2061,14 +2069,14 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
- 0xbf820001, 0xbf82021c,
+ 0xbf820001, 0xbf820220,
0xb0804004, 0xb978f802,
0x8a78ff78, 0x00020006,
0xb97bf803, 0x876eff78,
0x00002000, 0xbf840009,
0x876eff6d, 0x00ff0000,
0xbf85001e, 0x876eff7b,
- 0x00000400, 0xbf850041,
+ 0x00000400, 0xbf850045,
0xbf8e0010, 0xb97bf803,
0xbf82fffa, 0x876eff7b,
0x00000900, 0xbf850015,
@@ -2082,8 +2090,10 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xb96ef801, 0x876eff6e,
0x00000800, 0xbf850003,
0x876eff7b, 0x00000400,
- 0xbf850026, 0xb97af812,
+ 0xbf85002a, 0xb97af812,
0xb97bf813, 0x8ffa887a,
+ 0xbf0d8f7b, 0xbf840002,
+ 0x887bff7b, 0xffff0000,
0xf4011bbd, 0xfa000010,
0xbf8cc07f, 0x8f6e976e,
0x8a77ff77, 0x00800000,
@@ -2494,8 +2504,9 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
+
static const uint32_t cwsr_trap_gfx11_hex[] = {
- 0xbfa00001, 0xbfa00221,
+ 0xbfa00001, 0xbfa00225,
0xb0804006, 0xb8f8f802,
0x9178ff78, 0x00020006,
0xb8fbf803, 0xbf0d9e6d,
@@ -2505,7 +2516,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0xbfa10009, 0x8b6eff6d,
0x00ff0000, 0xbfa2001e,
0x8b6eff7b, 0x00000400,
- 0xbfa20041, 0xbf830010,
+ 0xbfa20045, 0xbf830010,
0xb8fbf803, 0xbfa0fffa,
0x8b6eff7b, 0x00000900,
0xbfa20015, 0x8b6eff7b,
@@ -2518,9 +2529,11 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0xbfa20007, 0xb8eef801,
0x8b6eff6e, 0x00000800,
0xbfa20003, 0x8b6eff7b,
- 0x00000400, 0xbfa20026,
+ 0x00000400, 0xbfa2002a,
0xbefa4d82, 0xbf89fc07,
- 0x84fa887a, 0xf4005bbd,
+ 0x84fa887a, 0xbf0d8f7b,
+ 0xbfa10002, 0x8c7bff7b,
+ 0xffff0000, 0xf4005bbd,
0xf8000010, 0xbf89fc07,
0x846e976e, 0x9177ff77,
0x00800000, 0x8c776e77,
@@ -2938,211 +2951,257 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
};
static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
- 0xbf820001, 0xbf8202d6,
- 0xb8f8f802, 0x89788678,
- 0xb8fbf803, 0x866eff78,
- 0x00002000, 0xbf840009,
- 0x866eff6d, 0x00ff0000,
- 0xbf85001a, 0x866eff7b,
- 0x00000400, 0xbf85004d,
- 0xbf8e0010, 0xb8fbf803,
- 0xbf82fffa, 0x866eff7b,
- 0x03c00900, 0xbf850011,
- 0x866eff7b, 0x000071ff,
- 0xbf840008, 0x866fff7b,
- 0x00007080, 0xbf840001,
- 0xbeee1a87, 0xb8eff801,
- 0x8e6e8c6e, 0x866e6f6e,
- 0xbf850006, 0x866eff6d,
- 0x00ff0000, 0xbf850003,
+ 0xbf820001, 0xbf8202db,
+ 0xb8f8f802, 0x8978ff78,
+ 0x00020006, 0xb8fbf803,
+ 0x866eff78, 0x00002000,
+ 0xbf840009, 0x866eff6d,
+ 0x00ff0000, 0xbf85001a,
0x866eff7b, 0x00000400,
- 0xbf850036, 0xb8faf807,
+ 0xbf850051, 0xbf8e0010,
+ 0xb8fbf803, 0xbf82fffa,
+ 0x866eff7b, 0x03c00900,
+ 0xbf850011, 0x866eff7b,
+ 0x000071ff, 0xbf840008,
+ 0x866fff7b, 0x00007080,
+ 0xbf840001, 0xbeee1a87,
+ 0xb8eff801, 0x8e6e8c6e,
+ 0x866e6f6e, 0xbf850006,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf850003, 0x866eff7b,
+ 0x00000400, 0xbf85003a,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8979ff79, 0xfc000000,
+ 0x87797a79, 0xba7ff807,
+ 0x00000000, 0xb8faf812,
+ 0xb8fbf813, 0x8efa887a,
+ 0xbf0d8f7b, 0xbf840002,
+ 0x877bff7b, 0xffff0000,
+ 0xc0031bbd, 0x00000010,
+ 0xbf8cc07f, 0x8e6e976e,
+ 0x8979ff79, 0x00800000,
+ 0x87796e79, 0xc0071bbd,
+ 0x00000000, 0xbf8cc07f,
+ 0xc0071ebd, 0x00000008,
+ 0xbf8cc07f, 0x86ee6e6e,
+ 0xbf840001, 0xbe801d6e,
+ 0x866eff6d, 0x01ff0000,
+ 0xbf850005, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x866eff6d, 0x01000000,
+ 0xbf850002, 0x806c846c,
+ 0x826d806d, 0x866dff6d,
+ 0x0000ffff, 0x8f7a8b79,
0x867aff7a, 0x001f8000,
- 0x8e7a8b7a, 0x8979ff79,
- 0xfc000000, 0x87797a79,
- 0xba7ff807, 0x00000000,
- 0xb8faf812, 0xb8fbf813,
- 0x8efa887a, 0xc0031bbd,
- 0x00000010, 0xbf8cc07f,
- 0x8e6e976e, 0x8979ff79,
- 0x00800000, 0x87796e79,
- 0xc0071bbd, 0x00000000,
- 0xbf8cc07f, 0xc0071ebd,
- 0x00000008, 0xbf8cc07f,
- 0x86ee6e6e, 0xbf840001,
- 0xbe801d6e, 0x866eff6d,
- 0x01ff0000, 0xbf850005,
- 0x8778ff78, 0x00002000,
- 0x80ec886c, 0x82ed806d,
- 0xbf820005, 0x866eff6d,
- 0x01000000, 0xbf850002,
- 0x806c846c, 0x826d806d,
+ 0xb97af807, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8378,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9780002, 0xbe801f6c,
0x866dff6d, 0x0000ffff,
- 0x8f7a8b79, 0x867aff7a,
- 0x001f8000, 0xb97af807,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f6e8378, 0xb96ee0c2,
- 0xbf800002, 0xb9780002,
- 0xbe801f6c, 0x866dff6d,
- 0x0000ffff, 0xbefa0080,
- 0xb97a0283, 0xb8faf807,
- 0x867aff7a, 0x001f8000,
- 0x8e7a8b7a, 0x8979ff79,
- 0xfc000000, 0x87797a79,
- 0xba7ff807, 0x00000000,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x877a8478, 0xb97af802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8fa2985, 0x807a817a,
- 0x8e7a8a7a, 0x8e7a817a,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b867b, 0x807a7b7a,
- 0x807a7e7a, 0x827b807f,
- 0x867bff7b, 0x0000ffff,
- 0xc04b1c3d, 0x00000050,
- 0xbf8cc07f, 0xc04b1d3d,
- 0x00000060, 0xbf8cc07f,
- 0xc0431e7d, 0x00000074,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0xbef1007c,
- 0xbef00080, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xbefe007c, 0xbefc0070,
- 0xc0611c7a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611b3a,
+ 0xbefa0080, 0xb97a0283,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8979ff79, 0xfc000000,
+ 0x87797a79, 0xba7ff807,
+ 0x00000000, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2985,
+ 0x807a817a, 0x8e7a8a7a,
+ 0x8e7a817a, 0xb8fb1605,
+ 0x807b817b, 0x8e7b867b,
+ 0x807a7b7a, 0x807a7e7a,
+ 0x827b807f, 0x867bff7b,
+ 0x0000ffff, 0xc04b1c3d,
+ 0x00000050, 0xbf8cc07f,
+ 0xc04b1d3d, 0x00000060,
+ 0xbf8cc07f, 0xc0431e7d,
+ 0x00000074, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0xbef1007c, 0xbef00080,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611b7a, 0x0000007c,
+ 0xc0611b3a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611bba,
+ 0xbefc0070, 0xc0611b7a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611bfa, 0x0000007c,
+ 0xc0611bba, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611e3a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8fbf803, 0xbefe007c,
- 0xbefc0070, 0xc0611efa,
+ 0xbefc0070, 0xc0611bfa,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611a3a, 0x0000007c,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611a7a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
- 0xb8f1f801, 0xbefe007c,
- 0xbefc0070, 0xc0611c7a,
+ 0xbefc0070, 0xc0611a3a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
- 0x867aff7f, 0x04000000,
- 0xbeef0080, 0x876f6f7a,
- 0xb8f02985, 0x80708170,
- 0x8e708a70, 0x8e708170,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b847b, 0x8e76827b,
- 0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747074,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a7b7c, 0xbf85ffe7,
- 0xbef40172, 0xbef00080,
- 0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
- 0xbef600ff, 0x01000000,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf85004d, 0xbe840080,
- 0xd2890000, 0x00000900,
- 0x80048104, 0xd2890001,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0x867aff7f,
+ 0x04000000, 0xbeef0080,
+ 0x876f6f7a, 0xb8f02985,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fb1605,
+ 0x807b817b, 0x8e7b847b,
+ 0x8e76827b, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747074, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a7b7c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbef00080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85004d,
+ 0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
- 0x80048104, 0xd2890003,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000902,
+ 0xd2890000, 0x00000903,
0x80048104, 0xd2890001,
- 0x00000902, 0x80048104,
- 0xd2890002, 0x00000902,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
0x80048104, 0xd2890003,
- 0x00000902, 0x80048104,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbf820008, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x867bc17b, 0xbf840064,
+ 0xbf8a0000, 0x867aff6f,
+ 0x04000000, 0xbf840060,
+ 0x8e7b867b, 0x8e7b827b,
+ 0xbef6007b, 0xb8f02985,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xd28c0002, 0x000100c1,
+ 0xd28d0003, 0x000204c1,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850030, 0x24040682,
+ 0xd86e4000, 0x00000002,
+ 0xbf8cc07f, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000903, 0x80048104,
- 0xd2890001, 0x00000903,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000903, 0x80048104,
- 0xd2890003, 0x00000903,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbf820008,
- 0xe0724000, 0x701d0000,
- 0xe0724100, 0x701d0100,
- 0xe0724200, 0x701d0200,
- 0xe0724300, 0x701d0300,
+ 0xbf84ffee, 0x680404ff,
+ 0x00000200, 0xd0c9006a,
+ 0x0000f702, 0xbf87ffd2,
+ 0xbf820015, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x701d0002, 0x68040702,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87fff7, 0xbef70000,
+ 0xbef000ff, 0x00000400,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb4306, 0x867bc17b,
- 0xbf840064, 0xbf8a0000,
- 0x867aff6f, 0x04000000,
- 0xbf840060, 0x8e7b867b,
- 0x8e7b827b, 0xbef6007b,
- 0xb8f02985, 0x80708170,
- 0x8e708a70, 0x8e708170,
- 0xb8fa1605, 0x807a817a,
- 0x8e7a867a, 0x80707a70,
- 0x8070ff70, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xd28c0002,
- 0x000100c1, 0xd28d0003,
- 0x000204c1, 0x867aff78,
+ 0xb8fb2b05, 0x807b817b,
+ 0x8e7b827b, 0xbef600ff,
+ 0x01000000, 0xbefc0084,
+ 0xbf0a7b7c, 0xbf84006d,
+ 0xbf11017c, 0x807bff7b,
+ 0x00001000, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850030,
- 0x24040682, 0xd86e4000,
- 0x00000002, 0xbf8cc07f,
+ 0x10000000, 0xbf850051,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -3162,31 +3221,51 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x680404ff, 0x00000200,
- 0xd0c9006a, 0x0000f702,
- 0xbf87ffd2, 0xbf820015,
- 0xd1060002, 0x00011103,
- 0x7e0602ff, 0x00000200,
- 0xbefc00ff, 0x00010000,
- 0xbe800077, 0x8677ff77,
- 0xff7fffff, 0x8777ff77,
- 0x00058000, 0xd8ec0000,
- 0x00000002, 0xbf8cc07f,
- 0xe0765000, 0x701d0002,
- 0x68040702, 0xd0c9006a,
- 0x0000f702, 0xbf87fff7,
- 0xbef70000, 0xbef000ff,
- 0x00000400, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb2b05,
- 0x807b817b, 0x8e7b827b,
- 0xbef600ff, 0x01000000,
- 0xbefc0084, 0xbf0a7b7c,
- 0xbf84006d, 0xbf11017c,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffb1, 0xbf9c0000,
+ 0xbf820012, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffef,
+ 0xbf9c0000, 0xb8fb2985,
+ 0x807b817b, 0x8e7b837b,
+ 0xb8fa2b05, 0x807a817a,
+ 0x8e7a827a, 0x80fb7a7b,
+ 0x867b7b7b, 0xbf84007a,
0x807bff7b, 0x00001000,
+ 0xbefc0080, 0xbf11017c,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850051, 0xbe840080,
+ 0xbf850059, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -3225,201 +3304,137 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0x807c847c,
- 0xbf0a7b7c, 0xbf85ffb1,
- 0xbf9c0000, 0xbf820012,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
+ 0xbf0a7b7c, 0xbf85ffa9,
+ 0xbf9c0000, 0xbf820016,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
0xe0724000, 0x701d0000,
0xe0724100, 0x701d0100,
0xe0724200, 0x701d0200,
0xe0724300, 0x701d0300,
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
- 0xbf85ffef, 0xbf9c0000,
- 0xb8fb2985, 0x807b817b,
- 0x8e7b837b, 0xb8fa2b05,
- 0x807a817a, 0x8e7a827a,
- 0x80fb7a7b, 0x867b7b7b,
- 0xbf84007a, 0x807bff7b,
- 0x00001000, 0xbefc0080,
- 0xbf11017c, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850059,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffa9, 0xbf9c0000,
- 0xbf820016, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffeb,
- 0xbf9c0000, 0xbf8200ee,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x04000000,
- 0xbf84001f, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf84001a,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x8078ff78,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xe0510000, 0x781d0000,
- 0xe0510100, 0x781d0000,
- 0x807cff7c, 0x00000200,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7c, 0xbf85fff6,
+ 0xbf85ffeb, 0xbf9c0000,
+ 0xbf8200ee, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x04000000, 0xbf84001f,
0xbefe00c1, 0xbeff00c1,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf84001a, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xb8ef2b05, 0x806f816f,
- 0x8e6f826f, 0x806fff6f,
- 0x00008000, 0xbef80080,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
+ 0xbefc0080, 0xe0510000,
+ 0x781d0000, 0xe0510100,
+ 0x781d0000, 0x807cff7c,
+ 0x00000200, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7c,
+ 0xbf85fff6, 0xbefe00c1,
+ 0xbeff00c1, 0xbef600ff,
+ 0x01000000, 0xb8ef2b05,
+ 0x806f816f, 0x8e6f826f,
+ 0x806fff6f, 0x00008000,
+ 0xbef80080, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xb8ef2985, 0x806f816f,
+ 0x8e6f836f, 0xb8f92b05,
+ 0x80798179, 0x8e798279,
+ 0x80ef796f, 0x866f6f6f,
+ 0xbf84001a, 0x806fff6f,
+ 0x00008000, 0xbefc0080,
0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
+ 0xd3d94000, 0x18000100,
+ 0xd3d94001, 0x18000101,
+ 0xd3d94002, 0x18000102,
+ 0xd3d94003, 0x18000103,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xb8ef2985,
- 0x806f816f, 0x8e6f836f,
- 0xb8f92b05, 0x80798179,
- 0x8e798279, 0x80ef796f,
- 0x866f6f6f, 0xbf84001a,
- 0x806fff6f, 0x00008000,
- 0xbefc0080, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0xd3d94000,
- 0x18000100, 0xd3d94001,
- 0x18000101, 0xd3d94002,
- 0x18000102, 0xd3d94003,
- 0x18000103, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffea,
- 0xbf9c0000, 0xe0524000,
- 0x6e1d0000, 0xe0524100,
- 0x6e1d0100, 0xe0524200,
- 0x6e1d0200, 0xe0524300,
- 0x6e1d0300, 0xbf8c0f70,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x80f8c078, 0xb8ef1605,
- 0x806f816f, 0x8e6f846f,
- 0x8e76826f, 0xbef600ff,
- 0x01000000, 0xbefc006f,
- 0xc031003a, 0x00000078,
- 0x80f8c078, 0xbf8cc07f,
- 0x80fc907c, 0xbf800000,
- 0xbe802d00, 0xbe822d02,
- 0xbe842d04, 0xbe862d06,
- 0xbe882d08, 0xbe8a2d0a,
- 0xbe8c2d0c, 0xbe8e2d0e,
- 0xbf06807c, 0xbf84fff0,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xc0211bfa,
+ 0xbf85ffea, 0xbf9c0000,
+ 0xe0524000, 0x6e1d0000,
+ 0xe0524100, 0x6e1d0100,
+ 0xe0524200, 0x6e1d0200,
+ 0xe0524300, 0x6e1d0300,
+ 0xbf8c0f70, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x80f8c078,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f846f, 0x8e76826f,
+ 0xbef600ff, 0x01000000,
+ 0xbefc006f, 0xc031003a,
+ 0x00000078, 0x80f8c078,
+ 0xbf8cc07f, 0x80fc907c,
+ 0xbf800000, 0xbe802d00,
+ 0xbe822d02, 0xbe842d04,
+ 0xbe862d06, 0xbe882d08,
+ 0xbe8a2d0a, 0xbe8c2d0c,
+ 0xbe8e2d0e, 0xbf06807c,
+ 0xbf84fff0, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
0x00000078, 0x80788478,
- 0xc0211b3a, 0x00000078,
- 0x80788478, 0xc0211b7a,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211c3a,
0x00000078, 0x80788478,
- 0xc0211c3a, 0x00000078,
- 0x80788478, 0xc0211c7a,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
0x00000078, 0x80788478,
- 0xc0211eba, 0x00000078,
- 0x80788478, 0xc0211efa,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211a3a,
0x00000078, 0x80788478,
- 0xc0211a3a, 0x00000078,
- 0x80788478, 0xc0211a7a,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
0x00000078, 0x80788478,
- 0xc0211cfa, 0x00000078,
- 0x80788478, 0xbf8cc07f,
- 0xbefc006f, 0xbefe0070,
- 0xbeff0071, 0x866f7bff,
- 0x000003ff, 0xb96f4803,
- 0x866f7bff, 0xfffff800,
- 0x8f6f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee2985,
- 0x806e816e, 0x8e6e8a6e,
- 0x8e6e816e, 0xb8ef1605,
- 0x806f816f, 0x8e6f866f,
- 0x806e6f6e, 0x806e746e,
- 0x826f8075, 0x866fff6f,
- 0x0000ffff, 0xc00b1c37,
- 0x00000050, 0xc00b1d37,
- 0x00000060, 0xc0031e77,
- 0x00000074, 0xbf8cc07f,
- 0x8f6e8b79, 0x866eff6e,
- 0x001f8000, 0xb96ef807,
- 0x866dff6d, 0x0000ffff,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f6e837a, 0xb96ee0c2,
- 0xbf800002, 0xb97a0002,
- 0xbf8a0000, 0xbe801f6c,
- 0xbf810000, 0x00000000,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x866f7bff, 0x000003ff,
+ 0xb96f4803, 0x866f7bff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2985, 0x806e816e,
+ 0x8e6e8a6e, 0x8e6e816e,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f866f, 0x806e6f6e,
+ 0x806e746e, 0x826f8075,
+ 0x866fff6f, 0x0000ffff,
+ 0xc00b1c37, 0x00000050,
+ 0xc00b1d37, 0x00000060,
+ 0xc0031e77, 0x00000074,
+ 0xbf8cc07f, 0x8f6e8b79,
+ 0x866eff6e, 0x001f8000,
+ 0xb96ef807, 0x866dff6d,
+ 0x0000ffff, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e837a,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb97a0002, 0xbf8a0000,
+ 0xbe801f6c, 0xbf810000,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 8b92c33c2a7c..fdab64624422 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -276,6 +276,11 @@ L_FETCH_2ND_TRAP:
#endif
s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+ s_bitcmp1_b32 ttmp15, 0xF
+ s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA
+ s_or_b32 ttmp15, ttmp15, 0xFFFF0000
+L_NO_SIGN_EXTEND_TMA:
+
s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 glc:1 // debug trap enabled flag
s_waitcnt lgkmcnt(0)
s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index f2087cc2e89d..e506411ad28a 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -283,6 +283,11 @@ L_FETCH_2ND_TRAP:
s_getreg_b32 ttmp15, hwreg(HW_REG_SQ_SHADER_TMA_HI)
s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+ s_bitcmp1_b32 ttmp15, 0xF
+ s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA
+ s_or_b32 ttmp15, ttmp15, 0xFFFF0000
+L_NO_SIGN_EXTEND_TMA:
+
s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 glc:1 // debug trap enabled flag
s_waitcnt lgkmcnt(0)
s_lshl_b32 ttmp2, ttmp2, TTMP_DEBUG_TRAP_ENABLED_SHIFT
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1b54a9aaae70..c37f1fcd2165 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -44,6 +44,7 @@
#include "amdgpu_amdkfd.h"
#include "kfd_smi_events.h"
#include "amdgpu_dma_buf.h"
+#include "kfd_debug.h"
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
static int kfd_open(struct inode *, struct file *);
@@ -142,15 +143,13 @@ static int kfd_open(struct inode *inode, struct file *filep)
return -EPERM;
}
- process = kfd_create_process(filep);
+ process = kfd_create_process(current);
if (IS_ERR(process))
return PTR_ERR(process);
- if (kfd_is_locked()) {
- dev_dbg(kfd_device, "kfd is locked!\n"
- "process %d unreferenced", process->pasid);
+ if (kfd_process_init_cwsr_apu(process, filep)) {
kfd_unref_process(process);
- return -EAGAIN;
+ return -EFAULT;
}
/* filep now owns the reference returned by kfd_create_process */
@@ -186,7 +185,12 @@ static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
static int set_queue_properties_from_user(struct queue_properties *q_properties,
struct kfd_ioctl_create_queue_args *args)
{
- if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ /*
+ * Repurpose queue percentage to accommodate new features:
+ * bit 0-7: queue percentage
+ * bit 8-15: pm4_target_xcc
+ */
+ if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
@@ -236,7 +240,9 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
q_properties->is_interop = false;
q_properties->is_gws = false;
- q_properties->queue_percent = args->queue_percentage;
+ q_properties->queue_percent = args->queue_percentage & 0xFF;
+ /* bit 8-15 are repurposed to be PM4 target XCC */
+ q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
q_properties->priority = args->queue_priority;
q_properties->queue_address = args->ring_base_address;
q_properties->queue_size = args->ring_size;
@@ -293,7 +299,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_create_queue_args *args = data;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
int err = 0;
unsigned int queue_id;
struct kfd_process_device *pdd;
@@ -327,16 +333,18 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
- if (!pdd->doorbell_index &&
- kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
- err = -ENOMEM;
- goto err_alloc_doorbells;
+ if (!pdd->qpd.proc_doorbells) {
+ err = kfd_alloc_process_doorbells(dev->kfd, pdd);
+ if (err) {
+ pr_debug("failed to allocate process doorbells\n");
+ goto err_bind_process;
+ }
}
/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
* on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
*/
- if (dev->shared_resources.enable_mes &&
+ if (dev->kfd->shared_resources.enable_mes &&
((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
>> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
struct amdgpu_bo_va_mapping *wptr_mapping;
@@ -404,13 +412,13 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
pr_debug("Write ptr address == 0x%016llX\n",
args->write_pointer_address);
+ kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0);
return 0;
err_create_queue:
if (wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
err_wptr_map_gart:
-err_alloc_doorbells:
err_bind_process:
err_pdd:
mutex_unlock(&p->mutex);
@@ -442,7 +450,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
struct kfd_ioctl_update_queue_args *args = data;
struct queue_properties properties;
- if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ /*
+ * Repurpose queue percentage to accommodate new features:
+ * bit 0-7: queue percentage
+ * bit 8-15: pm4_target_xcc
+ */
+ if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
@@ -466,7 +479,9 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_address = args->ring_base_address;
properties.queue_size = args->ring_size;
- properties.queue_percent = args->queue_percentage;
+ properties.queue_percent = args->queue_percentage & 0xFF;
+ /* bit 8-15 are repurposed to be PM4 target XCC */
+ properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
properties.priority = args->queue_priority;
pr_debug("Updating queue id %d for pasid 0x%x\n",
@@ -524,8 +539,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
goto out;
}
- minfo.update_flag = UPDATE_FLAG_CU_MASK;
-
mutex_lock(&p->mutex);
retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
@@ -887,7 +900,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
{
struct kfd_ioctl_set_scratch_backing_va_args *args = data;
struct kfd_process_device *pdd;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
long err;
mutex_lock(&p->mutex);
@@ -1006,19 +1019,23 @@ err_drm_file:
return ret;
}
-bool kfd_dev_is_large_bar(struct kfd_dev *dev)
+bool kfd_dev_is_large_bar(struct kfd_node *dev)
{
if (debug_largebar) {
pr_debug("Simulate large-bar allocation on non large-bar machine\n");
return true;
}
- if (dev->use_iommu_v2)
- return false;
-
if (dev->local_mem_info.local_mem_size_private == 0 &&
- dev->local_mem_info.local_mem_size_public > 0)
+ dev->local_mem_info.local_mem_size_public > 0)
return true;
+
+ if (dev->local_mem_info.local_mem_size_public == 0 &&
+ dev->kfd->adev->gmc.is_app_apu) {
+ pr_debug("APP APU, Consider like a large bar system\n");
+ return true;
+ }
+
return false;
}
@@ -1030,7 +1047,8 @@ static int kfd_ioctl_get_available_memory(struct file *filep,
if (!pdd)
return -EINVAL;
- args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev);
+ args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev,
+ pdd->dev->node_id);
kfd_unlock_pdd(pdd);
return 0;
}
@@ -1041,7 +1059,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
struct kfd_process_device *pdd;
void *mem;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
int idr_handle;
long err;
uint64_t offset = args->mmap_offset;
@@ -1105,7 +1123,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
}
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
- if (args->size != kfd_doorbell_process_slice(dev)) {
+ if (args->size != kfd_doorbell_process_slice(dev->kfd)) {
err = -EINVAL;
goto err_unlock;
}
@@ -1231,7 +1249,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
struct kfd_ioctl_map_memory_to_gpu_args *args = data;
struct kfd_process_device *pdd, *peer_pdd;
void *mem;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
long err = 0;
int i;
uint32_t *devices_arr = NULL;
@@ -1405,7 +1423,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
args->n_success = i+1;
}
- flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
+ flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd);
if (flush_tlb) {
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
(struct kgd_mem *) mem, true);
@@ -1445,7 +1463,7 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep,
int retval;
struct kfd_ioctl_alloc_queue_gws_args *args = data;
struct queue *q;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
mutex_lock(&p->mutex);
q = pqm_get_user_queue(&p->pqm, args->queue_id);
@@ -1467,6 +1485,12 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep,
goto out_unlock;
}
+ if (p->debug_trap_enabled && (!kfd_dbg_has_gws_support(dev) ||
+ kfd_dbg_has_cwsr_workaround(dev))) {
+ retval = -EBUSY;
+ goto out_unlock;
+ }
+
retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
mutex_unlock(&p->mutex);
@@ -1482,10 +1506,11 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_get_dmabuf_info_args *args = data;
- struct kfd_dev *dev = NULL;
+ struct kfd_node *dev = NULL;
struct amdgpu_device *dmabuf_adev;
void *metadata_buffer = NULL;
uint32_t flags;
+ int8_t xcp_id;
unsigned int i;
int r;
@@ -1506,17 +1531,14 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep,
r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
&dmabuf_adev, &args->size,
metadata_buffer, args->metadata_size,
- &args->metadata_size, &flags);
+ &args->metadata_size, &flags, &xcp_id);
if (r)
goto exit;
- /* Reverse-lookup gpu_id from kgd pointer */
- dev = kfd_device_by_adev(dmabuf_adev);
- if (!dev) {
- r = -EINVAL;
- goto exit;
- }
- args->gpu_id = dev->id;
+ if (xcp_id >= 0)
+ args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
+ else
+ args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id;
args->flags = flags;
/* Copy metadata buffer to user mode */
@@ -1596,7 +1618,7 @@ static int kfd_ioctl_export_dmabuf(struct file *filep,
struct kfd_ioctl_export_dmabuf_args *args = data;
struct kfd_process_device *pdd;
struct dma_buf *dmabuf;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
void *mem;
int ret = 0;
@@ -1822,22 +1844,21 @@ static uint32_t get_process_num_bos(struct kfd_process *p)
idr_for_each_entry(&pdd->alloc_idr, mem, id) {
struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
- if ((uint64_t)kgd_mem->va > pdd->gpuvm_base)
+ if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base)
num_of_bos++;
}
}
return num_of_bos;
}
-static int criu_get_prime_handle(struct drm_gem_object *gobj, int flags,
+static int criu_get_prime_handle(struct kgd_mem *mem, int flags,
u32 *shared_fd)
{
struct dma_buf *dmabuf;
int ret;
- dmabuf = amdgpu_gem_prime_export(gobj, flags);
- if (IS_ERR(dmabuf)) {
- ret = PTR_ERR(dmabuf);
+ ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
+ if (ret) {
pr_err("dmabuf export failed for the BO\n");
return ret;
}
@@ -1895,7 +1916,11 @@ static int criu_checkpoint_bos(struct kfd_process *p,
kgd_mem = (struct kgd_mem *)mem;
dumper_bo = kgd_mem->bo;
- if ((uint64_t)kgd_mem->va <= pdd->gpuvm_base)
+ /* Skip checkpointing BOs that are used for Trap handler
+ * code and state. Currently, these BOs have a VA that
+ * is less GPUVM Base
+ */
+ if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base)
continue;
bo_bucket = &bo_buckets[bo_index];
@@ -1917,7 +1942,7 @@ static int criu_checkpoint_bos(struct kfd_process *p,
}
if (bo_bucket->alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
- ret = criu_get_prime_handle(&dumper_bo->tbo.base,
+ ret = criu_get_prime_handle(kgd_mem,
bo_bucket->alloc_flags &
KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
&bo_bucket->dmabuf_fd);
@@ -2178,7 +2203,7 @@ static int criu_restore_devices(struct kfd_process *p,
}
for (i = 0; i < args->num_devices; i++) {
- struct kfd_dev *dev;
+ struct kfd_node *dev;
struct kfd_process_device *pdd;
struct file *drm_file;
@@ -2239,10 +2264,10 @@ static int criu_restore_devices(struct kfd_process *p,
goto exit;
}
- if (!pdd->doorbell_index &&
- kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
- ret = -ENOMEM;
- goto exit;
+ if (!pdd->qpd.proc_doorbells) {
+ ret = kfd_alloc_process_doorbells(dev->kfd, pdd);
+ if (ret)
+ goto exit;
}
}
@@ -2268,7 +2293,8 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
u64 offset;
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
- if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev))
+ if (bo_bucket->size !=
+ kfd_doorbell_process_slice(pdd->dev->kfd))
return -EINVAL;
offset = kfd_get_process_doorbells(pdd);
@@ -2350,7 +2376,7 @@ static int criu_restore_bo(struct kfd_process *p,
/* now map these BOs to GPU/s */
for (j = 0; j < p->n_pdds; j++) {
- struct kfd_dev *peer;
+ struct kfd_node *peer;
struct kfd_process_device *peer_pdd;
if (!bo_priv->mapped_gpuids[j])
@@ -2378,7 +2404,7 @@ static int criu_restore_bo(struct kfd_process *p,
/* create the dmabuf object and export the bo */
if (bo_bucket->alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
- ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base, DRM_RDWR,
+ ret = criu_get_prime_handle(kgd_mem, DRM_RDWR,
&bo_bucket->dmabuf_fd);
if (ret)
return ret;
@@ -2715,6 +2741,367 @@ static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data)
return ret;
}
+static int runtime_enable(struct kfd_process *p, uint64_t r_debug,
+ bool enable_ttmp_setup)
+{
+ int i = 0, ret = 0;
+
+ if (p->is_runtime_retry)
+ goto retry;
+
+ if (p->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
+ return -EBUSY;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ if (pdd->qpd.queue_count)
+ return -EEXIST;
+
+ /*
+ * Setup TTMPs by default.
+ * Note that this call must remain here for MES ADD QUEUE to
+ * skip_process_ctx_clear unconditionally as the first call to
+ * SET_SHADER_DEBUGGER clears any stale process context data
+ * saved in MES.
+ */
+ if (pdd->dev->kfd->shared_resources.enable_mes)
+ kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
+ }
+
+ p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
+ p->runtime_info.r_debug = r_debug;
+ p->runtime_info.ttmp_setup = enable_ttmp_setup;
+
+ if (p->runtime_info.ttmp_setup) {
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) {
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ pdd->dev->kfd2kgd->enable_debug_trap(
+ pdd->dev->adev,
+ true,
+ pdd->dev->vm_info.last_vmid_kfd);
+ } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
+ pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
+ pdd->dev->adev,
+ false,
+ 0);
+ }
+ }
+ }
+
+retry:
+ if (p->debug_trap_enabled) {
+ if (!p->is_runtime_retry) {
+ kfd_dbg_trap_activate(p);
+ kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
+ p, NULL, 0, false, NULL, 0);
+ }
+
+ mutex_unlock(&p->mutex);
+ ret = down_interruptible(&p->runtime_enable_sema);
+ mutex_lock(&p->mutex);
+
+ p->is_runtime_retry = !!ret;
+ }
+
+ return ret;
+}
+
+static int runtime_disable(struct kfd_process *p)
+{
+ int i = 0, ret;
+ bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED;
+
+ p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED;
+ p->runtime_info.r_debug = 0;
+
+ if (p->debug_trap_enabled) {
+ if (was_enabled)
+ kfd_dbg_trap_deactivate(p, false, 0);
+
+ if (!p->is_runtime_retry)
+ kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
+ p, NULL, 0, false, NULL, 0);
+
+ mutex_unlock(&p->mutex);
+ ret = down_interruptible(&p->runtime_enable_sema);
+ mutex_lock(&p->mutex);
+
+ p->is_runtime_retry = !!ret;
+ if (ret)
+ return ret;
+ }
+
+ if (was_enabled && p->runtime_info.ttmp_setup) {
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ if (!kfd_dbg_is_rlc_restore_supported(pdd->dev))
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+ }
+ }
+
+ p->runtime_info.ttmp_setup = false;
+
+ /* disable ttmp setup */
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
+ pdd->spi_dbg_override =
+ pdd->dev->kfd2kgd->disable_debug_trap(
+ pdd->dev->adev,
+ false,
+ pdd->dev->vm_info.last_vmid_kfd);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ debug_refresh_runlist(pdd->dev->dqm);
+ else
+ kfd_dbg_set_mes_debug_mode(pdd,
+ !kfd_dbg_has_cwsr_workaround(pdd->dev));
+ }
+ }
+
+ return 0;
+}
+
+static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_runtime_enable_args *args = data;
+ int r;
+
+ mutex_lock(&p->mutex);
+
+ if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK)
+ r = runtime_enable(p, args->r_debug,
+ !!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK));
+ else
+ r = runtime_disable(p);
+
+ mutex_unlock(&p->mutex);
+
+ return r;
+}
+
+static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_dbg_trap_args *args = data;
+ struct task_struct *thread = NULL;
+ struct mm_struct *mm = NULL;
+ struct pid *pid = NULL;
+ struct kfd_process *target = NULL;
+ struct kfd_process_device *pdd = NULL;
+ int r = 0;
+
+ if (sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ pr_err("Debugging does not support sched_policy %i", sched_policy);
+ return -EINVAL;
+ }
+
+ pid = find_get_pid(args->pid);
+ if (!pid) {
+ pr_debug("Cannot find pid info for %i\n", args->pid);
+ r = -ESRCH;
+ goto out;
+ }
+
+ thread = get_pid_task(pid, PIDTYPE_PID);
+ if (!thread) {
+ r = -ESRCH;
+ goto out;
+ }
+
+ mm = get_task_mm(thread);
+ if (!mm) {
+ r = -ESRCH;
+ goto out;
+ }
+
+ if (args->op == KFD_IOC_DBG_TRAP_ENABLE) {
+ bool create_process;
+
+ rcu_read_lock();
+ create_process = thread && thread != current && ptrace_parent(thread) == current;
+ rcu_read_unlock();
+
+ target = create_process ? kfd_create_process(thread) :
+ kfd_lookup_process_by_pid(pid);
+ } else {
+ target = kfd_lookup_process_by_pid(pid);
+ }
+
+ if (IS_ERR_OR_NULL(target)) {
+ pr_debug("Cannot find process PID %i to debug\n", args->pid);
+ r = target ? PTR_ERR(target) : -ESRCH;
+ goto out;
+ }
+
+ /* Check if target is still PTRACED. */
+ rcu_read_lock();
+ if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE
+ && ptrace_parent(target->lead_thread) != current) {
+ pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid);
+ r = -EPERM;
+ }
+ rcu_read_unlock();
+
+ if (r)
+ goto out;
+
+ mutex_lock(&target->mutex);
+
+ if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) {
+ pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op);
+ r = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_ENABLED &&
+ (args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE ||
+ args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE ||
+ args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES ||
+ args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES ||
+ args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
+ args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH ||
+ args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) {
+ r = -EPERM;
+ goto unlock_out;
+ }
+
+ if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
+ args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) {
+ int user_gpu_id = kfd_process_get_user_gpu_id(target,
+ args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ?
+ args->set_node_address_watch.gpu_id :
+ args->clear_node_address_watch.gpu_id);
+
+ pdd = kfd_process_device_data_by_id(target, user_gpu_id);
+ if (user_gpu_id == -EINVAL || !pdd) {
+ r = -ENODEV;
+ goto unlock_out;
+ }
+ }
+
+ switch (args->op) {
+ case KFD_IOC_DBG_TRAP_ENABLE:
+ if (target != p)
+ target->debugger_process = p;
+
+ r = kfd_dbg_trap_enable(target,
+ args->enable.dbg_fd,
+ (void __user *)args->enable.rinfo_ptr,
+ &args->enable.rinfo_size);
+ if (!r)
+ target->exception_enable_mask = args->enable.exception_mask;
+
+ break;
+ case KFD_IOC_DBG_TRAP_DISABLE:
+ r = kfd_dbg_trap_disable(target);
+ break;
+ case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT:
+ r = kfd_dbg_send_exception_to_runtime(target,
+ args->send_runtime_event.gpu_id,
+ args->send_runtime_event.queue_id,
+ args->send_runtime_event.exception_mask);
+ break;
+ case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED:
+ kfd_dbg_set_enabled_debug_exception_mask(target,
+ args->set_exceptions_enabled.exception_mask);
+ break;
+ case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE:
+ r = kfd_dbg_trap_set_wave_launch_override(target,
+ args->launch_override.override_mode,
+ args->launch_override.enable_mask,
+ args->launch_override.support_request_mask,
+ &args->launch_override.enable_mask,
+ &args->launch_override.support_request_mask);
+ break;
+ case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE:
+ r = kfd_dbg_trap_set_wave_launch_mode(target,
+ args->launch_mode.launch_mode);
+ break;
+ case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES:
+ r = suspend_queues(target,
+ args->suspend_queues.num_queues,
+ args->suspend_queues.grace_period,
+ args->suspend_queues.exception_mask,
+ (uint32_t *)args->suspend_queues.queue_array_ptr);
+
+ break;
+ case KFD_IOC_DBG_TRAP_RESUME_QUEUES:
+ r = resume_queues(target, args->resume_queues.num_queues,
+ (uint32_t *)args->resume_queues.queue_array_ptr);
+ break;
+ case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH:
+ r = kfd_dbg_trap_set_dev_address_watch(pdd,
+ args->set_node_address_watch.address,
+ args->set_node_address_watch.mask,
+ &args->set_node_address_watch.id,
+ args->set_node_address_watch.mode);
+ break;
+ case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH:
+ r = kfd_dbg_trap_clear_dev_address_watch(pdd,
+ args->clear_node_address_watch.id);
+ break;
+ case KFD_IOC_DBG_TRAP_SET_FLAGS:
+ r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags);
+ break;
+ case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT:
+ r = kfd_dbg_ev_query_debug_event(target,
+ &args->query_debug_event.queue_id,
+ &args->query_debug_event.gpu_id,
+ args->query_debug_event.exception_mask,
+ &args->query_debug_event.exception_mask);
+ break;
+ case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO:
+ r = kfd_dbg_trap_query_exception_info(target,
+ args->query_exception_info.source_id,
+ args->query_exception_info.exception_code,
+ args->query_exception_info.clear_exception,
+ (void __user *)args->query_exception_info.info_ptr,
+ &args->query_exception_info.info_size);
+ break;
+ case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT:
+ r = pqm_get_queue_snapshot(&target->pqm,
+ args->queue_snapshot.exception_mask,
+ (void __user *)args->queue_snapshot.snapshot_buf_ptr,
+ &args->queue_snapshot.num_queues,
+ &args->queue_snapshot.entry_size);
+ break;
+ case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT:
+ r = kfd_dbg_trap_device_snapshot(target,
+ args->device_snapshot.exception_mask,
+ (void __user *)args->device_snapshot.snapshot_buf_ptr,
+ &args->device_snapshot.num_devices,
+ &args->device_snapshot.entry_size);
+ break;
+ default:
+ pr_err("Invalid option: %i\n", args->op);
+ r = -EINVAL;
+ }
+
+unlock_out:
+ mutex_unlock(&target->mutex);
+
+out:
+ if (thread)
+ put_task_struct(thread);
+
+ if (mm)
+ mmput(mm);
+
+ if (pid)
+ put_pid(pid);
+
+ if (target)
+ kfd_unref_process(target);
+
+ return r;
+}
+
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
.cmd_drv = 0, .name = #ioctl}
@@ -2827,6 +3214,12 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF,
kfd_ioctl_export_dmabuf, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_RUNTIME_ENABLE,
+ kfd_ioctl_runtime_enable, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP,
+ kfd_ioctl_set_debug_trap, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
@@ -2947,7 +3340,7 @@ err_i1:
return retcode;
}
-static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
+static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
phys_addr_t address;
@@ -2981,7 +3374,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct kfd_process *process;
- struct kfd_dev *dev = NULL;
+ struct kfd_node *dev = NULL;
unsigned long mmap_offset;
unsigned int gpu_id;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 475e47027354..86fb7ac7982a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -26,7 +26,6 @@
#include "kfd_crat.h"
#include "kfd_priv.h"
#include "kfd_topology.h"
-#include "kfd_iommu.h"
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
@@ -1166,7 +1165,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
props->weight = 20;
else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
- props->weight = 15 * iolink->num_hops_xgmi;
+ props->weight = iolink->weight_xgmi;
else
props->weight = node_distance(id_from, id_to);
@@ -1405,7 +1404,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
return i;
}
-int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info)
+int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
{
int num_of_cache_types = 0;
@@ -1524,7 +1523,7 @@ int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pca
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
num_of_cache_types =
- kfd_fill_gpu_cache_info_from_gfx_config(kdev, *pcache_info);
+ kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info);
break;
default:
*pcache_info = dummy_cache_info;
@@ -1536,76 +1535,6 @@ int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pca
return num_of_cache_types;
}
-static bool kfd_ignore_crat(void)
-{
- bool ret;
-
- if (ignore_crat)
- return true;
-
-#ifndef KFD_SUPPORT_IOMMU_V2
- ret = true;
-#else
- ret = false;
-#endif
-
- return ret;
-}
-
-/*
- * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
- * copies CRAT from ACPI (if available).
- * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
- *
- * @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
- * crat_image will be NULL
- * @size: [OUT] size of crat_image
- *
- * Return 0 if successful else return error code
- */
-int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
-{
- struct acpi_table_header *crat_table;
- acpi_status status;
- void *pcrat_image;
- int rc = 0;
-
- if (!crat_image)
- return -EINVAL;
-
- *crat_image = NULL;
-
- if (kfd_ignore_crat()) {
- pr_info("CRAT table disabled by module option\n");
- return -ENODATA;
- }
-
- /* Fetch the CRAT table from ACPI */
- status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
- if (status == AE_NOT_FOUND) {
- pr_info("CRAT table not found\n");
- return -ENODATA;
- } else if (ACPI_FAILURE(status)) {
- const char *err = acpi_format_exception(status);
-
- pr_err("CRAT table error: %s\n", err);
- return -EINVAL;
- }
-
- pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
- if (!pcrat_image) {
- rc = -ENOMEM;
- goto out;
- }
-
- memcpy(pcrat_image, crat_table, crat_table->length);
- *crat_image = pcrat_image;
- *size = crat_table->length;
-out:
- acpi_put_table(crat_table);
- return rc;
-}
-
/* Memory required to create Virtual CRAT.
* Since there is no easy way to predict the amount of memory required, the
* following amount is allocated for GPU Virtual CRAT. This is
@@ -1858,7 +1787,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
}
static int kfd_fill_gpu_memory_affinity(int *avail_size,
- struct kfd_dev *kdev, uint8_t type, uint64_t size,
+ struct kfd_node *kdev, uint8_t type, uint64_t size,
struct crat_subtype_memory *sub_type_hdr,
uint32_t proximity_domain,
const struct kfd_local_mem_info *local_mem_info)
@@ -1887,7 +1816,7 @@ static int kfd_fill_gpu_memory_affinity(int *avail_size,
}
#ifdef CONFIG_ACPI_NUMA
-static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
+static void kfd_find_numa_node_in_srat(struct kfd_node *kdev)
{
struct acpi_table_header *table_header = NULL;
struct acpi_subtable_header *sub_header = NULL;
@@ -1972,6 +1901,9 @@ static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
}
#endif
+#define KFD_CRAT_INTRA_SOCKET_WEIGHT 13
+#define KFD_CRAT_XGMI_WEIGHT 15
+
/* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
* to its NUMA node
* @avail_size: Available size in the memory
@@ -1982,7 +1914,7 @@ static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
* Return 0 if successful else return -ve value
*/
static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
- struct kfd_dev *kdev,
+ struct kfd_node *kdev,
struct crat_subtype_iolink *sub_type_hdr,
uint32_t proximity_domain)
{
@@ -2002,7 +1934,16 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
/* Fill in IOLINK subtype.
* TODO: Fill-in other fields of iolink subtype
*/
- if (kdev->adev->gmc.xgmi.connected_to_cpu) {
+ if (kdev->adev->gmc.xgmi.connected_to_cpu ||
+ (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 3) &&
+ kdev->adev->smuio.funcs->get_pkg_type(kdev->adev) ==
+ AMDGPU_PKG_TYPE_APU)) {
+ bool ext_cpu = KFD_GC_VERSION(kdev) != IP_VERSION(9, 4, 3);
+ int mem_bw = 819200, weight = ext_cpu ? KFD_CRAT_XGMI_WEIGHT :
+ KFD_CRAT_INTRA_SOCKET_WEIGHT;
+ uint32_t bandwidth = ext_cpu ? amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
+ kdev->adev, NULL, true) : mem_bw;
+
/*
* with host gpu xgmi link, host can access gpu memory whether
* or not pcie bar type is large, so always create bidirectional
@@ -2010,14 +1951,9 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
*/
sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
- sub_type_hdr->num_hops_xgmi = 1;
- if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) {
- sub_type_hdr->minimum_bandwidth_mbs =
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
- kdev->adev, NULL, true);
- sub_type_hdr->maximum_bandwidth_mbs =
- sub_type_hdr->minimum_bandwidth_mbs;
- }
+ sub_type_hdr->weight_xgmi = weight;
+ sub_type_hdr->minimum_bandwidth_mbs = bandwidth;
+ sub_type_hdr->maximum_bandwidth_mbs = bandwidth;
} else {
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
sub_type_hdr->minimum_bandwidth_mbs =
@@ -2029,7 +1965,8 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
sub_type_hdr->proximity_domain_from = proximity_domain;
#ifdef CONFIG_ACPI_NUMA
- if (kdev->adev->pdev->dev.numa_node == NUMA_NO_NODE)
+ if (kdev->adev->pdev->dev.numa_node == NUMA_NO_NODE &&
+ num_possible_nodes() > 1)
kfd_find_numa_node_in_srat(kdev);
#endif
#ifdef CONFIG_NUMA
@@ -2044,12 +1981,14 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
}
static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
- struct kfd_dev *kdev,
- struct kfd_dev *peer_kdev,
+ struct kfd_node *kdev,
+ struct kfd_node *peer_kdev,
struct crat_subtype_iolink *sub_type_hdr,
uint32_t proximity_domain_from,
uint32_t proximity_domain_to)
{
+ bool use_ta_info = kdev->kfd->num_nodes == 1;
+
*avail_size -= sizeof(struct crat_subtype_iolink);
if (*avail_size < 0)
return -ENOMEM;
@@ -2064,12 +2003,25 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
sub_type_hdr->proximity_domain_from = proximity_domain_from;
sub_type_hdr->proximity_domain_to = proximity_domain_to;
- sub_type_hdr->num_hops_xgmi =
- amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
- sub_type_hdr->maximum_bandwidth_mbs =
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false);
- sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
- amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
+
+ if (use_ta_info) {
+ sub_type_hdr->weight_xgmi = KFD_CRAT_XGMI_WEIGHT *
+ amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
+ sub_type_hdr->maximum_bandwidth_mbs =
+ amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev,
+ peer_kdev->adev, false);
+ sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
+ amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
+ } else {
+ bool is_single_hop = kdev->kfd == peer_kdev->kfd;
+ int weight = is_single_hop ? KFD_CRAT_INTRA_SOCKET_WEIGHT :
+ (2 * KFD_CRAT_INTRA_SOCKET_WEIGHT) + KFD_CRAT_XGMI_WEIGHT;
+ int mem_bw = 819200;
+
+ sub_type_hdr->weight_xgmi = weight;
+ sub_type_hdr->maximum_bandwidth_mbs = is_single_hop ? mem_bw : 0;
+ sub_type_hdr->minimum_bandwidth_mbs = is_single_hop ? mem_bw : 0;
+ }
return 0;
}
@@ -2081,7 +2033,7 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
* [OUT] actual size of data filled in crat_image
*/
static int kfd_create_vcrat_image_gpu(void *pcrat_image,
- size_t *size, struct kfd_dev *kdev,
+ size_t *size, struct kfd_node *kdev,
uint32_t proximity_domain)
{
struct crat_header *crat_table = (struct crat_header *)pcrat_image;
@@ -2150,12 +2102,6 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
cu->hsa_capability = 0;
- /* Check if this node supports IOMMU. During parsing this flag will
- * translate to HSA_CAP_ATS_PRESENT
- */
- if (!kfd_iommu_check_device(kdev))
- cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
-
crat_table->length += sub_type_hdr->length;
crat_table->total_entries++;
@@ -2216,12 +2162,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
* (from other GPU to this GPU) will be added
* in kfd_parse_subtype_iolink.
*/
- if (kdev->hive_id) {
+ if (kdev->kfd->hive_id) {
for (nid = 0; nid < proximity_domain; ++nid) {
peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid);
if (!peer_dev->gpu)
continue;
- if (peer_dev->gpu->hive_id != kdev->hive_id)
+ if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id)
continue;
sub_type_hdr = (typeof(sub_type_hdr))(
(char *)sub_type_hdr +
@@ -2255,12 +2201,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
* (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
* -- this option is not currently implemented.
* The assumption is that all AMD APUs will have CRAT
- * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
+ * @kdev: Valid kfd_node required if flags contain COMPUTE_UNIT_GPU
*
* Return 0 if successful else return -ve value
*/
int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
- int flags, struct kfd_dev *kdev,
+ int flags, struct kfd_node *kdev,
uint32_t proximity_domain)
{
void *pcrat_image = NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
index 8d1e8ba58dee..387a8ef49385 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
@@ -275,7 +275,7 @@ struct crat_subtype_iolink {
uint32_t maximum_bandwidth_mbs;
uint32_t recommended_transfer_size;
uint8_t reserved2[CRAT_IOLINK_RESERVED_LENGTH - 1];
- uint8_t num_hops_xgmi;
+ uint8_t weight_xgmi;
};
/*
@@ -293,7 +293,7 @@ struct crat_subtype_generic {
#pragma pack()
-struct kfd_dev;
+struct kfd_node;
/* Static table to describe GPU Cache information */
struct kfd_gpu_cache_info {
@@ -305,14 +305,13 @@ struct kfd_gpu_cache_info {
*/
uint32_t num_cu_shared;
};
-int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info);
+int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info);
-int kfd_create_crat_image_acpi(void **crat_image, size_t *size);
void kfd_destroy_crat_image(void *crat_image);
int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
uint32_t proximity_domain);
int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
- int flags, struct kfd_dev *kdev,
+ int flags, struct kfd_node *kdev,
uint32_t proximity_domain);
#endif /* KFD_CRAT_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
new file mode 100644
index 000000000000..9ec750666382
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -0,0 +1,1120 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "kfd_debug.h"
+#include "kfd_device_queue_manager.h"
+#include "kfd_topology.h"
+#include <linux/file.h>
+#include <uapi/linux/kfd_ioctl.h>
+
+#define MAX_WATCH_ADDRESSES 4
+
+int kfd_dbg_ev_query_debug_event(struct kfd_process *process,
+ unsigned int *queue_id,
+ unsigned int *gpu_id,
+ uint64_t exception_clear_mask,
+ uint64_t *event_status)
+{
+ struct process_queue_manager *pqm;
+ struct process_queue_node *pqn;
+ int i;
+
+ if (!(process && process->debug_trap_enabled))
+ return -ENODATA;
+
+ mutex_lock(&process->event_mutex);
+ *event_status = 0;
+ *queue_id = 0;
+ *gpu_id = 0;
+
+ /* find and report queue events */
+ pqm = &process->pqm;
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ uint64_t tmp = process->exception_enable_mask;
+
+ if (!pqn->q)
+ continue;
+
+ tmp &= pqn->q->properties.exception_status;
+
+ if (!tmp)
+ continue;
+
+ *event_status = pqn->q->properties.exception_status;
+ *queue_id = pqn->q->properties.queue_id;
+ *gpu_id = pqn->q->device->id;
+ pqn->q->properties.exception_status &= ~exception_clear_mask;
+ goto out;
+ }
+
+ /* find and report device events */
+ for (i = 0; i < process->n_pdds; i++) {
+ struct kfd_process_device *pdd = process->pdds[i];
+ uint64_t tmp = process->exception_enable_mask
+ & pdd->exception_status;
+
+ if (!tmp)
+ continue;
+
+ *event_status = pdd->exception_status;
+ *gpu_id = pdd->dev->id;
+ pdd->exception_status &= ~exception_clear_mask;
+ goto out;
+ }
+
+ /* report process events */
+ if (process->exception_enable_mask & process->exception_status) {
+ *event_status = process->exception_status;
+ process->exception_status &= ~exception_clear_mask;
+ }
+
+out:
+ mutex_unlock(&process->event_mutex);
+ return *event_status ? 0 : -EAGAIN;
+}
+
+void debug_event_write_work_handler(struct work_struct *work)
+{
+ struct kfd_process *process;
+
+ static const char write_data = '.';
+ loff_t pos = 0;
+
+ process = container_of(work,
+ struct kfd_process,
+ debug_event_workarea);
+
+ kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
+}
+
+/* update process/device/queue exception status, write to descriptor
+ * only if exception_status is enabled.
+ */
+bool kfd_dbg_ev_raise(uint64_t event_mask,
+ struct kfd_process *process, struct kfd_node *dev,
+ unsigned int source_id, bool use_worker,
+ void *exception_data, size_t exception_data_size)
+{
+ struct process_queue_manager *pqm;
+ struct process_queue_node *pqn;
+ int i;
+ static const char write_data = '.';
+ loff_t pos = 0;
+ bool is_subscribed = true;
+
+ if (!(process && process->debug_trap_enabled))
+ return false;
+
+ mutex_lock(&process->event_mutex);
+
+ if (event_mask & KFD_EC_MASK_DEVICE) {
+ for (i = 0; i < process->n_pdds; i++) {
+ struct kfd_process_device *pdd = process->pdds[i];
+
+ if (pdd->dev != dev)
+ continue;
+
+ pdd->exception_status |= event_mask & KFD_EC_MASK_DEVICE;
+
+ if (event_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
+ if (!pdd->vm_fault_exc_data) {
+ pdd->vm_fault_exc_data = kmemdup(
+ exception_data,
+ exception_data_size,
+ GFP_KERNEL);
+ if (!pdd->vm_fault_exc_data)
+ pr_debug("Failed to allocate exception data memory");
+ } else {
+ pr_debug("Debugger exception data not saved\n");
+ print_hex_dump_bytes("exception data: ",
+ DUMP_PREFIX_OFFSET,
+ exception_data,
+ exception_data_size);
+ }
+ }
+ break;
+ }
+ } else if (event_mask & KFD_EC_MASK_PROCESS) {
+ process->exception_status |= event_mask & KFD_EC_MASK_PROCESS;
+ } else {
+ pqm = &process->pqm;
+ list_for_each_entry(pqn, &pqm->queues,
+ process_queue_list) {
+ int target_id;
+
+ if (!pqn->q)
+ continue;
+
+ target_id = event_mask & KFD_EC_MASK(EC_QUEUE_NEW) ?
+ pqn->q->properties.queue_id :
+ pqn->q->doorbell_id;
+
+ if (pqn->q->device != dev || target_id != source_id)
+ continue;
+
+ pqn->q->properties.exception_status |= event_mask;
+ break;
+ }
+ }
+
+ if (process->exception_enable_mask & event_mask) {
+ if (use_worker)
+ schedule_work(&process->debug_event_workarea);
+ else
+ kernel_write(process->dbg_ev_file,
+ &write_data,
+ 1,
+ &pos);
+ } else {
+ is_subscribed = false;
+ }
+
+ mutex_unlock(&process->event_mutex);
+
+ return is_subscribed;
+}
+
+/* set pending event queue entry from ring entry */
+bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
+ unsigned int pasid,
+ uint32_t doorbell_id,
+ uint64_t trap_mask,
+ void *exception_data,
+ size_t exception_data_size)
+{
+ struct kfd_process *p;
+ bool signaled_to_debugger_or_runtime = false;
+
+ p = kfd_lookup_process_by_pasid(pasid);
+
+ if (!p)
+ return false;
+
+ if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true,
+ exception_data, exception_data_size)) {
+ struct process_queue_manager *pqm;
+ struct process_queue_node *pqn;
+
+ if (!!(trap_mask & KFD_EC_MASK_QUEUE) &&
+ p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) {
+ mutex_lock(&p->mutex);
+
+ pqm = &p->pqm;
+ list_for_each_entry(pqn, &pqm->queues,
+ process_queue_list) {
+
+ if (!(pqn->q && pqn->q->device == dev &&
+ pqn->q->doorbell_id == doorbell_id))
+ continue;
+
+ kfd_send_exception_to_runtime(p, pqn->q->properties.queue_id,
+ trap_mask);
+
+ signaled_to_debugger_or_runtime = true;
+
+ break;
+ }
+
+ mutex_unlock(&p->mutex);
+ } else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
+ kfd_dqm_evict_pasid(dev->dqm, p->pasid);
+ kfd_signal_vm_fault_event(dev, p->pasid, NULL,
+ exception_data);
+
+ signaled_to_debugger_or_runtime = true;
+ }
+ } else {
+ signaled_to_debugger_or_runtime = true;
+ }
+
+ kfd_unref_process(p);
+
+ return signaled_to_debugger_or_runtime;
+}
+
+int kfd_dbg_send_exception_to_runtime(struct kfd_process *p,
+ unsigned int dev_id,
+ unsigned int queue_id,
+ uint64_t error_reason)
+{
+ if (error_reason & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
+ struct kfd_process_device *pdd = NULL;
+ struct kfd_hsa_memory_exception_data *data;
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ if (p->pdds[i]->dev->id == dev_id) {
+ pdd = p->pdds[i];
+ break;
+ }
+ }
+
+ if (!pdd)
+ return -ENODEV;
+
+ data = (struct kfd_hsa_memory_exception_data *)
+ pdd->vm_fault_exc_data;
+
+ kfd_dqm_evict_pasid(pdd->dev->dqm, p->pasid);
+ kfd_signal_vm_fault_event(pdd->dev, p->pasid, NULL, data);
+ error_reason &= ~KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION);
+ }
+
+ if (error_reason & (KFD_EC_MASK(EC_PROCESS_RUNTIME))) {
+ /*
+ * block should only happen after the debugger receives runtime
+ * enable notice.
+ */
+ up(&p->runtime_enable_sema);
+ error_reason &= ~KFD_EC_MASK(EC_PROCESS_RUNTIME);
+ }
+
+ if (error_reason)
+ return kfd_send_exception_to_runtime(p, queue_id, error_reason);
+
+ return 0;
+}
+
+static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable)
+{
+ struct mqd_update_info minfo = {0};
+ int err;
+
+ if (!q)
+ return 0;
+
+ if (!kfd_dbg_has_cwsr_workaround(q->device))
+ return 0;
+
+ if (enable && q->properties.is_user_cu_masked)
+ return -EBUSY;
+
+ minfo.update_flag = enable ? UPDATE_FLAG_DBG_WA_ENABLE : UPDATE_FLAG_DBG_WA_DISABLE;
+
+ q->properties.is_dbg_wa = enable;
+ err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo);
+ if (err)
+ q->properties.is_dbg_wa = false;
+
+ return err;
+}
+
+static int kfd_dbg_set_workaround(struct kfd_process *target, bool enable)
+{
+ struct process_queue_manager *pqm = &target->pqm;
+ struct process_queue_node *pqn;
+ int r = 0;
+
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ r = kfd_dbg_set_queue_workaround(pqn->q, enable);
+ if (enable && r)
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list)
+ kfd_dbg_set_queue_workaround(pqn->q, false);
+
+ if (enable)
+ target->runtime_info.runtime_state = r == -EBUSY ?
+ DEBUG_RUNTIME_STATE_ENABLED_BUSY :
+ DEBUG_RUNTIME_STATE_ENABLED_ERROR;
+
+ return r;
+}
+
+int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
+{
+ uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
+ uint32_t flags = pdd->process->dbg_flags;
+
+ if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
+ return 0;
+
+ return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
+ pdd->watch_points, flags, sq_trap_en);
+}
+
+#define KFD_DEBUGGER_INVALID_WATCH_POINT_ID -1
+static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_id)
+{
+ int i;
+
+ *watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID;
+
+ spin_lock(&pdd->dev->kfd->watch_points_lock);
+
+ for (i = 0; i < MAX_WATCH_ADDRESSES; i++) {
+ /* device watchpoint in use so skip */
+ if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1)
+ continue;
+
+ pdd->alloc_watch_ids |= 0x1 << i;
+ pdd->dev->kfd->alloc_watch_ids |= 0x1 << i;
+ *watch_id = i;
+ spin_unlock(&pdd->dev->kfd->watch_points_lock);
+ return 0;
+ }
+
+ spin_unlock(&pdd->dev->kfd->watch_points_lock);
+
+ return -ENOMEM;
+}
+
+static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
+{
+ spin_lock(&pdd->dev->kfd->watch_points_lock);
+
+ /* process owns device watch point so safe to clear */
+ if ((pdd->alloc_watch_ids >> watch_id) & 0x1) {
+ pdd->alloc_watch_ids &= ~(0x1 << watch_id);
+ pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id);
+ }
+
+ spin_unlock(&pdd->dev->kfd->watch_points_lock);
+}
+
+static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id)
+{
+ bool owns_watch_id = false;
+
+ spin_lock(&pdd->dev->kfd->watch_points_lock);
+ owns_watch_id = watch_id < MAX_WATCH_ADDRESSES &&
+ ((pdd->alloc_watch_ids >> watch_id) & 0x1);
+
+ spin_unlock(&pdd->dev->kfd->watch_points_lock);
+
+ return owns_watch_id;
+}
+
+int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd,
+ uint32_t watch_id)
+{
+ int r;
+
+ if (!kfd_dbg_owns_dev_watch_id(pdd, watch_id))
+ return -EINVAL;
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes) {
+ r = debug_lock_and_unmap(pdd->dev->dqm);
+ if (r)
+ return r;
+ }
+
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ pdd->watch_points[watch_id] = pdd->dev->kfd2kgd->clear_address_watch(
+ pdd->dev->adev,
+ watch_id);
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_map_and_unlock(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd, true);
+
+ kfd_dbg_clear_dev_watch_id(pdd, watch_id);
+
+ return r;
+}
+
+int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t *watch_id,
+ uint32_t watch_mode)
+{
+ int xcc_id, r = kfd_dbg_get_dev_watch_id(pdd, watch_id);
+ uint32_t xcc_mask = pdd->dev->xcc_mask;
+
+ if (r)
+ return r;
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes) {
+ r = debug_lock_and_unmap(pdd->dev->dqm);
+ if (r) {
+ kfd_dbg_clear_dev_watch_id(pdd, *watch_id);
+ return r;
+ }
+ }
+
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ for_each_inst(xcc_id, xcc_mask)
+ pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch(
+ pdd->dev->adev,
+ watch_address,
+ watch_address_mask,
+ *watch_id,
+ watch_mode,
+ pdd->dev->vm_info.last_vmid_kfd,
+ xcc_id);
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_map_and_unlock(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd, true);
+
+ /* HWS is broken so no point in HW rollback but release the watchpoint anyways */
+ if (r)
+ kfd_dbg_clear_dev_watch_id(pdd, *watch_id);
+
+ return 0;
+}
+
+static void kfd_dbg_clear_process_address_watch(struct kfd_process *target)
+{
+ int i, j;
+
+ for (i = 0; i < target->n_pdds; i++)
+ for (j = 0; j < MAX_WATCH_ADDRESSES; j++)
+ kfd_dbg_trap_clear_dev_address_watch(target->pdds[i], j);
+}
+
+int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags)
+{
+ uint32_t prev_flags = target->dbg_flags;
+ int i, r = 0, rewind_count = 0;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ if (!kfd_dbg_is_per_vmid_supported(target->pdds[i]->dev) &&
+ (*flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP)) {
+ *flags = prev_flags;
+ return -EACCES;
+ }
+ }
+
+ target->dbg_flags = *flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP;
+ *flags = prev_flags;
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
+ continue;
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_refresh_runlist(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd, true);
+
+ if (r) {
+ target->dbg_flags = prev_flags;
+ break;
+ }
+
+ rewind_count++;
+ }
+
+ /* Rewind flags */
+ if (r) {
+ target->dbg_flags = prev_flags;
+
+ for (i = 0; i < rewind_count; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
+ continue;
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ debug_refresh_runlist(pdd->dev->dqm);
+ else
+ kfd_dbg_set_mes_debug_mode(pdd, true);
+ }
+ }
+
+ return r;
+}
+
+/* kfd_dbg_trap_deactivate:
+ * target: target process
+ * unwind: If this is unwinding a failed kfd_dbg_trap_enable()
+ * unwind_count:
+ * If unwind == true, how far down the pdd list we need
+ * to unwind
+ * else: ignored
+ */
+void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count)
+{
+ int i;
+
+ if (!unwind) {
+ uint32_t flags = 0;
+ int resume_count = resume_queues(target, 0, NULL);
+
+ if (resume_count)
+ pr_debug("Resumed %d queues\n", resume_count);
+
+ cancel_work_sync(&target->debug_event_workarea);
+ kfd_dbg_clear_process_address_watch(target);
+ kfd_dbg_trap_set_wave_launch_mode(target, 0);
+
+ kfd_dbg_trap_set_flags(target, &flags);
+ }
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ /* If this is an unwind, and we have unwound the required
+ * enable calls on the pdd list, we need to stop now
+ * otherwise we may mess up another debugger session.
+ */
+ if (unwind && i == unwind_count)
+ break;
+
+ kfd_process_set_trap_debug_flag(&pdd->qpd, false);
+
+ /* GFX off is already disabled by debug activate if not RLC restore supported. */
+ if (kfd_dbg_is_rlc_restore_supported(pdd->dev))
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ pdd->spi_dbg_override =
+ pdd->dev->kfd2kgd->disable_debug_trap(
+ pdd->dev->adev,
+ target->runtime_info.ttmp_setup,
+ pdd->dev->vm_info.last_vmid_kfd);
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ if (!kfd_dbg_is_per_vmid_supported(pdd->dev) &&
+ release_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd))
+ pr_err("Failed to release debug vmid on [%i]\n", pdd->dev->id);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ debug_refresh_runlist(pdd->dev->dqm);
+ else
+ kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
+ }
+
+ kfd_dbg_set_workaround(target, false);
+}
+
+static void kfd_dbg_clean_exception_status(struct kfd_process *target)
+{
+ struct process_queue_manager *pqm;
+ struct process_queue_node *pqn;
+ int i;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ kfd_process_drain_interrupts(pdd);
+
+ pdd->exception_status = 0;
+ }
+
+ pqm = &target->pqm;
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ if (!pqn->q)
+ continue;
+
+ pqn->q->properties.exception_status = 0;
+ }
+
+ target->exception_status = 0;
+}
+
+int kfd_dbg_trap_disable(struct kfd_process *target)
+{
+ if (!target->debug_trap_enabled)
+ return 0;
+
+ /*
+ * Defer deactivation to runtime if runtime not enabled otherwise reset
+ * attached running target runtime state to enable for re-attach.
+ */
+ if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED)
+ kfd_dbg_trap_deactivate(target, false, 0);
+ else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
+ target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
+
+ fput(target->dbg_ev_file);
+ target->dbg_ev_file = NULL;
+
+ if (target->debugger_process) {
+ atomic_dec(&target->debugger_process->debugged_process_count);
+ target->debugger_process = NULL;
+ }
+
+ target->debug_trap_enabled = false;
+ kfd_dbg_clean_exception_status(target);
+ kfd_unref_process(target);
+
+ return 0;
+}
+
+int kfd_dbg_trap_activate(struct kfd_process *target)
+{
+ int i, r = 0;
+
+ r = kfd_dbg_set_workaround(target, true);
+ if (r)
+ return r;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) {
+ r = reserve_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd);
+
+ if (r) {
+ target->runtime_info.runtime_state = (r == -EBUSY) ?
+ DEBUG_RUNTIME_STATE_ENABLED_BUSY :
+ DEBUG_RUNTIME_STATE_ENABLED_ERROR;
+
+ goto unwind_err;
+ }
+ }
+
+ /* Disable GFX OFF to prevent garbage read/writes to debug registers.
+ * If RLC restore of debug registers is not supported and runtime enable
+ * hasn't done so already on ttmp setup request, restore the trap config registers.
+ *
+ * If RLC restore of debug registers is not supported, keep gfx off disabled for
+ * the debug session.
+ */
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ if (!(kfd_dbg_is_rlc_restore_supported(pdd->dev) ||
+ target->runtime_info.ttmp_setup))
+ pdd->dev->kfd2kgd->enable_debug_trap(pdd->dev->adev, true,
+ pdd->dev->vm_info.last_vmid_kfd);
+
+ pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
+ pdd->dev->adev,
+ false,
+ pdd->dev->vm_info.last_vmid_kfd);
+
+ if (kfd_dbg_is_rlc_restore_supported(pdd->dev))
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ /*
+ * Setting the debug flag in the trap handler requires that the TMA has been
+ * allocated, which occurs during CWSR initialization.
+ * In the event that CWSR has not been initialized at this point, setting the
+ * flag will be called again during CWSR initialization if the target process
+ * is still debug enabled.
+ */
+ kfd_process_set_trap_debug_flag(&pdd->qpd, true);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_refresh_runlist(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd, true);
+
+ if (r) {
+ target->runtime_info.runtime_state =
+ DEBUG_RUNTIME_STATE_ENABLED_ERROR;
+ goto unwind_err;
+ }
+ }
+
+ return 0;
+
+unwind_err:
+ /* Enabling debug failed, we need to disable on
+ * all GPUs so the enable is all or nothing.
+ */
+ kfd_dbg_trap_deactivate(target, true, i);
+ return r;
+}
+
+int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
+ void __user *runtime_info, uint32_t *runtime_size)
+{
+ struct file *f;
+ uint32_t copy_size;
+ int i, r = 0;
+
+ if (target->debug_trap_enabled)
+ return -EALREADY;
+
+ /* Enable pre-checks */
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ if (!KFD_IS_SOC15(pdd->dev))
+ return -ENODEV;
+
+ if (pdd->qpd.num_gws && (!kfd_dbg_has_gws_support(pdd->dev) ||
+ kfd_dbg_has_cwsr_workaround(pdd->dev)))
+ return -EBUSY;
+ }
+
+ copy_size = min((size_t)(*runtime_size), sizeof(target->runtime_info));
+
+ f = fget(fd);
+ if (!f) {
+ pr_err("Failed to get file for (%i)\n", fd);
+ return -EBADF;
+ }
+
+ target->dbg_ev_file = f;
+
+ /* defer activation to runtime if not runtime enabled */
+ if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED)
+ kfd_dbg_trap_activate(target);
+
+ /* We already hold the process reference but hold another one for the
+ * debug session.
+ */
+ kref_get(&target->ref);
+ target->debug_trap_enabled = true;
+
+ if (target->debugger_process)
+ atomic_inc(&target->debugger_process->debugged_process_count);
+
+ if (copy_to_user(runtime_info, (void *)&target->runtime_info, copy_size)) {
+ kfd_dbg_trap_deactivate(target, false, 0);
+ r = -EFAULT;
+ }
+
+ *runtime_size = sizeof(target->runtime_info);
+
+ return r;
+}
+
+static int kfd_dbg_validate_trap_override_request(struct kfd_process *p,
+ uint32_t trap_override,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_supported)
+{
+ int i = 0;
+
+ *trap_mask_supported = 0xffffffff;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+ int err = pdd->dev->kfd2kgd->validate_trap_override_request(
+ pdd->dev->adev,
+ trap_override,
+ trap_mask_supported);
+
+ if (err)
+ return err;
+ }
+
+ if (trap_mask_request & ~*trap_mask_supported)
+ return -EACCES;
+
+ return 0;
+}
+
+int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t *trap_mask_supported)
+{
+ int r = 0, i;
+
+ r = kfd_dbg_validate_trap_override_request(target,
+ trap_override,
+ trap_mask_request,
+ trap_mask_supported);
+
+ if (r)
+ return r;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ pdd->spi_dbg_override = pdd->dev->kfd2kgd->set_wave_launch_trap_override(
+ pdd->dev->adev,
+ pdd->dev->vm_info.last_vmid_kfd,
+ trap_override,
+ trap_mask_bits,
+ trap_mask_request,
+ trap_mask_prev,
+ pdd->spi_dbg_override);
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_refresh_runlist(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd, true);
+
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target,
+ uint8_t wave_launch_mode)
+{
+ int r = 0, i;
+
+ if (wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL &&
+ wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT &&
+ wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG)
+ return -EINVAL;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
+ pdd->spi_dbg_launch_mode = pdd->dev->kfd2kgd->set_wave_launch_mode(
+ pdd->dev->adev,
+ wave_launch_mode,
+ pdd->dev->vm_info.last_vmid_kfd);
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+
+ if (!pdd->dev->kfd->shared_resources.enable_mes)
+ r = debug_refresh_runlist(pdd->dev->dqm);
+ else
+ r = kfd_dbg_set_mes_debug_mode(pdd, true);
+
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+int kfd_dbg_trap_query_exception_info(struct kfd_process *target,
+ uint32_t source_id,
+ uint32_t exception_code,
+ bool clear_exception,
+ void __user *info,
+ uint32_t *info_size)
+{
+ bool found = false;
+ int r = 0;
+ uint32_t copy_size, actual_info_size = 0;
+ uint64_t *exception_status_ptr = NULL;
+
+ if (!target)
+ return -EINVAL;
+
+ if (!info || !info_size)
+ return -EINVAL;
+
+ mutex_lock(&target->event_mutex);
+
+ if (KFD_DBG_EC_TYPE_IS_QUEUE(exception_code)) {
+ /* Per queue exceptions */
+ struct queue *queue = NULL;
+ int i;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+ struct qcm_process_device *qpd = &pdd->qpd;
+
+ list_for_each_entry(queue, &qpd->queues_list, list) {
+ if (!found && queue->properties.queue_id == source_id) {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ break;
+ }
+
+ if (!found) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!(queue->properties.exception_status & KFD_EC_MASK(exception_code))) {
+ r = -ENODATA;
+ goto out;
+ }
+ exception_status_ptr = &queue->properties.exception_status;
+ } else if (KFD_DBG_EC_TYPE_IS_DEVICE(exception_code)) {
+ /* Per device exceptions */
+ struct kfd_process_device *pdd = NULL;
+ int i;
+
+ for (i = 0; i < target->n_pdds; i++) {
+ pdd = target->pdds[i];
+ if (pdd->dev->id == source_id) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!(pdd->exception_status & KFD_EC_MASK(exception_code))) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ if (exception_code == EC_DEVICE_MEMORY_VIOLATION) {
+ copy_size = min((size_t)(*info_size), pdd->vm_fault_exc_data_size);
+
+ if (copy_to_user(info, pdd->vm_fault_exc_data, copy_size)) {
+ r = -EFAULT;
+ goto out;
+ }
+ actual_info_size = pdd->vm_fault_exc_data_size;
+ if (clear_exception) {
+ kfree(pdd->vm_fault_exc_data);
+ pdd->vm_fault_exc_data = NULL;
+ pdd->vm_fault_exc_data_size = 0;
+ }
+ }
+ exception_status_ptr = &pdd->exception_status;
+ } else if (KFD_DBG_EC_TYPE_IS_PROCESS(exception_code)) {
+ /* Per process exceptions */
+ if (!(target->exception_status & KFD_EC_MASK(exception_code))) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ if (exception_code == EC_PROCESS_RUNTIME) {
+ copy_size = min((size_t)(*info_size), sizeof(target->runtime_info));
+
+ if (copy_to_user(info, (void *)&target->runtime_info, copy_size)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ actual_info_size = sizeof(target->runtime_info);
+ }
+
+ exception_status_ptr = &target->exception_status;
+ } else {
+ pr_debug("Bad exception type [%i]\n", exception_code);
+ r = -EINVAL;
+ goto out;
+ }
+
+ *info_size = actual_info_size;
+ if (clear_exception)
+ *exception_status_ptr &= ~KFD_EC_MASK(exception_code);
+out:
+ mutex_unlock(&target->event_mutex);
+ return r;
+}
+
+int kfd_dbg_trap_device_snapshot(struct kfd_process *target,
+ uint64_t exception_clear_mask,
+ void __user *user_info,
+ uint32_t *number_of_device_infos,
+ uint32_t *entry_size)
+{
+ struct kfd_dbg_device_info_entry device_info;
+ uint32_t tmp_entry_size = *entry_size, tmp_num_devices;
+ int i, r = 0;
+
+ if (!(target && user_info && number_of_device_infos && entry_size))
+ return -EINVAL;
+
+ tmp_num_devices = min_t(size_t, *number_of_device_infos, target->n_pdds);
+ *number_of_device_infos = target->n_pdds;
+ *entry_size = min_t(size_t, *entry_size, sizeof(device_info));
+
+ if (!tmp_num_devices)
+ return 0;
+
+ memset(&device_info, 0, sizeof(device_info));
+
+ mutex_lock(&target->event_mutex);
+
+ /* Run over all pdd of the process */
+ for (i = 0; i < tmp_num_devices; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+ struct kfd_topology_device *topo_dev = kfd_topology_device_by_id(pdd->dev->id);
+
+ device_info.gpu_id = pdd->dev->id;
+ device_info.exception_status = pdd->exception_status;
+ device_info.lds_base = pdd->lds_base;
+ device_info.lds_limit = pdd->lds_limit;
+ device_info.scratch_base = pdd->scratch_base;
+ device_info.scratch_limit = pdd->scratch_limit;
+ device_info.gpuvm_base = pdd->gpuvm_base;
+ device_info.gpuvm_limit = pdd->gpuvm_limit;
+ device_info.location_id = topo_dev->node_props.location_id;
+ device_info.vendor_id = topo_dev->node_props.vendor_id;
+ device_info.device_id = topo_dev->node_props.device_id;
+ device_info.revision_id = pdd->dev->adev->pdev->revision;
+ device_info.subsystem_vendor_id = pdd->dev->adev->pdev->subsystem_vendor;
+ device_info.subsystem_device_id = pdd->dev->adev->pdev->subsystem_device;
+ device_info.fw_version = pdd->dev->kfd->mec_fw_version;
+ device_info.gfx_target_version =
+ topo_dev->node_props.gfx_target_version;
+ device_info.simd_count = topo_dev->node_props.simd_count;
+ device_info.max_waves_per_simd =
+ topo_dev->node_props.max_waves_per_simd;
+ device_info.array_count = topo_dev->node_props.array_count;
+ device_info.simd_arrays_per_engine =
+ topo_dev->node_props.simd_arrays_per_engine;
+ device_info.num_xcc = NUM_XCC(pdd->dev->xcc_mask);
+ device_info.capability = topo_dev->node_props.capability;
+ device_info.debug_prop = topo_dev->node_props.debug_prop;
+
+ if (exception_clear_mask)
+ pdd->exception_status &= ~exception_clear_mask;
+
+ if (copy_to_user(user_info, &device_info, *entry_size)) {
+ r = -EFAULT;
+ break;
+ }
+
+ user_info += tmp_entry_size;
+ }
+
+ mutex_unlock(&target->event_mutex);
+
+ return r;
+}
+
+void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target,
+ uint64_t exception_set_mask)
+{
+ uint64_t found_mask = 0;
+ struct process_queue_manager *pqm;
+ struct process_queue_node *pqn;
+ static const char write_data = '.';
+ loff_t pos = 0;
+ int i;
+
+ mutex_lock(&target->event_mutex);
+
+ found_mask |= target->exception_status;
+
+ pqm = &target->pqm;
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ if (!pqn->q)
+ continue;
+
+ found_mask |= pqn->q->properties.exception_status;
+ }
+
+ for (i = 0; i < target->n_pdds; i++) {
+ struct kfd_process_device *pdd = target->pdds[i];
+
+ found_mask |= pdd->exception_status;
+ }
+
+ if (exception_set_mask & found_mask)
+ kernel_write(target->dbg_ev_file, &write_data, 1, &pos);
+
+ target->exception_enable_mask = exception_set_mask;
+
+ mutex_unlock(&target->event_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
new file mode 100644
index 000000000000..fd0ff64d4184
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef KFD_DEBUG_EVENTS_H_INCLUDED
+#define KFD_DEBUG_EVENTS_H_INCLUDED
+
+#include "kfd_priv.h"
+
+void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count);
+int kfd_dbg_trap_activate(struct kfd_process *target);
+int kfd_dbg_ev_query_debug_event(struct kfd_process *process,
+ unsigned int *queue_id,
+ unsigned int *gpu_id,
+ uint64_t exception_clear_mask,
+ uint64_t *event_status);
+bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
+ unsigned int pasid,
+ uint32_t doorbell_id,
+ uint64_t trap_mask,
+ void *exception_data,
+ size_t exception_data_size);
+bool kfd_dbg_ev_raise(uint64_t event_mask,
+ struct kfd_process *process, struct kfd_node *dev,
+ unsigned int source_id, bool use_worker,
+ void *exception_data,
+ size_t exception_data_size);
+int kfd_dbg_trap_disable(struct kfd_process *target);
+int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
+ void __user *runtime_info,
+ uint32_t *runtime_info_size);
+int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t *trap_mask_supported);
+int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target,
+ uint8_t wave_launch_mode);
+int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd,
+ uint32_t watch_id);
+int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t *watch_id,
+ uint32_t watch_mode);
+int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags);
+int kfd_dbg_trap_query_exception_info(struct kfd_process *target,
+ uint32_t source_id,
+ uint32_t exception_code,
+ bool clear_exception,
+ void __user *info,
+ uint32_t *info_size);
+int kfd_dbg_send_exception_to_runtime(struct kfd_process *p,
+ unsigned int dev_id,
+ unsigned int queue_id,
+ uint64_t error_reason);
+
+static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev)
+{
+ return (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
+ KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0));
+}
+
+void debug_event_write_work_handler(struct work_struct *work);
+int kfd_dbg_trap_device_snapshot(struct kfd_process *target,
+ uint64_t exception_clear_mask,
+ void __user *user_info,
+ uint32_t *number_of_device_infos,
+ uint32_t *entry_size);
+
+void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target,
+ uint64_t exception_set_mask);
+/*
+ * If GFX off is enabled, chips that do not support RLC restore for the debug
+ * registers will disable GFX off temporarily for the entire debug session.
+ * See disable_on_trap_action_entry and enable_on_trap_action_exit for details.
+ */
+static inline bool kfd_dbg_is_rlc_restore_supported(struct kfd_node *dev)
+{
+ return !(KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 10) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1));
+}
+
+static inline bool kfd_dbg_has_cwsr_workaround(struct kfd_node *dev)
+{
+ return KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
+ KFD_GC_VERSION(dev) <= IP_VERSION(11, 0, 3);
+}
+
+static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev)
+{
+ if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1)
+ && dev->kfd->mec2_fw_version < 0x81b6) ||
+ (KFD_GC_VERSION(dev) >= IP_VERSION(9, 1, 0)
+ && KFD_GC_VERSION(dev) <= IP_VERSION(9, 2, 2)
+ && dev->kfd->mec2_fw_version < 0x1b6) ||
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0)
+ && dev->kfd->mec2_fw_version < 0x1b6) ||
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1)
+ && dev->kfd->mec2_fw_version < 0x30) ||
+ (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
+ KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0)))
+ return false;
+
+ /* Assume debugging and cooperative launch supported otherwise. */
+ return true;
+}
+
+int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en);
+
+static inline bool kfd_dbg_has_ttmps_always_setup(struct kfd_node *dev)
+{
+ return (KFD_GC_VERSION(dev) < IP_VERSION(11, 0, 0) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 2)) ||
+ (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
+ KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0) &&
+ (dev->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 70);
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index ad5a40a685ac..4a5a0a4e00f2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -43,7 +43,7 @@ static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
const char __user *user_buf, size_t size, loff_t *ppos)
{
- struct kfd_dev *dev;
+ struct kfd_node *dev;
char tmp[16];
uint32_t gpu_id;
int ret = -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 00f528eb9812..93ce181eb3ba 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -29,11 +29,12 @@
#include "kfd_pm4_headers_vi.h"
#include "kfd_pm4_headers_aldebaran.h"
#include "cwsr_trap_handler.h"
-#include "kfd_iommu.h"
#include "amdgpu_amdkfd.h"
#include "kfd_smi_events.h"
+#include "kfd_svm.h"
#include "kfd_migrate.h"
#include "amdgpu.h"
+#include "amdgpu_xcp.h"
#define MQD_SIZE_ALIGNED 768
@@ -42,7 +43,7 @@
* once locked, kfd driver will stop any further GPU execution.
* create process (open) will return -EAGAIN.
*/
-static atomic_t kfd_locked = ATOMIC_INIT(0);
+static int kfd_locked;
#ifdef CONFIG_DRM_AMDGPU_CIK
extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
@@ -51,6 +52,7 @@ extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
extern const struct kfd2kgd_calls arcturus_kfd2kgd;
extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
+extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
@@ -59,8 +61,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
-static int kfd_resume_iommu(struct kfd_dev *kfd);
-static int kfd_resume(struct kfd_dev *kfd);
+static int kfd_resume(struct kfd_node *kfd);
static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
{
@@ -81,6 +82,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(4, 2, 0):/* VEGA20 */
case IP_VERSION(4, 2, 2):/* ARCTURUS */
case IP_VERSION(4, 4, 0):/* ALDEBARAN */
+ case IP_VERSION(4, 4, 2):
case IP_VERSION(5, 0, 0):/* NAVI10 */
case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
case IP_VERSION(5, 0, 2):/* NAVI14 */
@@ -102,20 +104,19 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
kfd->device_info.num_sdma_queues_per_engine = 8;
}
+ bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES);
+
switch (sdma_version) {
case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
- kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
- break;
- case IP_VERSION(6, 0, 1):
- /* Reserve 1 for paging and 1 for gfx */
- kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
- /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
- kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
+ bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0,
+ kfd->adev->sdma.num_instances *
+ kfd->device_info.num_reserved_sdma_queues_per_engine);
break;
default:
break;
@@ -135,6 +136,12 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(9, 4, 0): /* VEGA20 */
case IP_VERSION(9, 4, 1): /* ARCTURUS */
case IP_VERSION(9, 4, 2): /* ALDEBARAN */
+ kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
+ break;
+ case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
+ kfd->device_info.event_interrupt_class =
+ &event_interrupt_class_v9_4_3;
+ break;
case IP_VERSION(10, 3, 1): /* VANGOGH */
case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
@@ -148,7 +155,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
- kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
+ kfd->device_info.event_interrupt_class = &event_interrupt_class_v10;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
@@ -185,11 +192,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
kfd_device_info_set_event_interrupt_class(kfd);
- /* Raven */
- if (gc_version == IP_VERSION(9, 1, 0) ||
- gc_version == IP_VERSION(9, 2, 2))
- kfd->device_info.needs_iommu_device = true;
-
if (gc_version < IP_VERSION(11, 0, 0)) {
/* Navi2x+, Navi1x+ */
if (gc_version == IP_VERSION(10, 3, 6))
@@ -224,10 +226,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
asic_type != CHIP_TONGA)
kfd->device_info.supports_cwsr = true;
- if (asic_type == CHIP_KAVERI ||
- asic_type == CHIP_CARRIZO)
- kfd->device_info.needs_iommu_device = true;
-
if (asic_type != CHIP_HAWAII && !vf)
kfd->device_info.needs_pci_atomics = true;
}
@@ -240,7 +238,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
uint32_t gfx_target_version = 0;
switch (adev->asic_type) {
-#ifdef KFD_SUPPORT_IOMMU_V2
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
gfx_target_version = 70000;
@@ -253,7 +250,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
if (!vf)
f2g = &gfx_v8_kfd2kgd;
break;
-#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_HAWAII:
gfx_target_version = 70001;
@@ -289,7 +285,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 90000;
f2g = &gfx_v9_kfd2kgd;
break;
-#ifdef KFD_SUPPORT_IOMMU_V2
/* Raven */
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
@@ -297,7 +292,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
if (!vf)
f2g = &gfx_v9_kfd2kgd;
break;
-#endif
/* Vega12 */
case IP_VERSION(9, 2, 1):
gfx_target_version = 90004;
@@ -327,8 +321,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &aldebaran_kfd2kgd;
break;
case IP_VERSION(9, 4, 3):
- gfx_target_version = 90400;
- f2g = &aldebaran_kfd2kgd;
+ gfx_target_version = adev->rev_id >= 1 ? 90402
+ : adev->flags & AMD_IS_APU ? 90400
+ : 90401;
+ f2g = &gc_9_4_3_kfd2kgd;
break;
/* Navi10 */
case IP_VERSION(10, 1, 10):
@@ -406,8 +402,15 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 0, 3):
- /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
- gfx_target_version = 110001;
+ if ((adev->pdev->device == 0x7460 &&
+ adev->pdev->revision == 0x00) ||
+ (adev->pdev->device == 0x7461 &&
+ adev->pdev->revision == 0x00))
+ /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
+ gfx_target_version = 110005;
+ else
+ /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+ gfx_target_version = 110001;
f2g = &gfx_v11_kfd2kgd;
break;
default:
@@ -437,10 +440,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
atomic_set(&kfd->compute_profile, 0);
mutex_init(&kfd->doorbell_mutex);
- memset(&kfd->doorbell_available_index, 0,
- sizeof(kfd->doorbell_available_index));
-
- atomic_set(&kfd->sram_ecc_flag, 0);
ida_init(&kfd->doorbell_ida);
@@ -488,41 +487,152 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
}
}
-static int kfd_gws_init(struct kfd_dev *kfd)
+static int kfd_gws_init(struct kfd_node *node)
{
int ret = 0;
+ struct kfd_dev *kfd = node->kfd;
+ uint32_t mes_rev = node->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
- if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
+ if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
return 0;
- if (hws_gws_support || (KFD_IS_SOC15(kfd) &&
- ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1)
+ if (hws_gws_support || (KFD_IS_SOC15(node) &&
+ ((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1)
&& kfd->mec2_fw_version >= 0x81b3) ||
- (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0)
+ (KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0)
&& kfd->mec2_fw_version >= 0x1b3) ||
- (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)
+ (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1)
&& kfd->mec2_fw_version >= 0x30) ||
- (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)
+ (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
&& kfd->mec2_fw_version >= 0x28) ||
- (KFD_GC_VERSION(kfd) >= IP_VERSION(10, 3, 0)
- && KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)
- && kfd->mec2_fw_version >= 0x6b))))
- ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
- kfd->adev->gds.gws_size, &kfd->gws);
+ (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3)) ||
+ (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
+ && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
+ && kfd->mec2_fw_version >= 0x6b) ||
+ (KFD_GC_VERSION(node) >= IP_VERSION(11, 0, 0)
+ && KFD_GC_VERSION(node) < IP_VERSION(12, 0, 0)
+ && mes_rev >= 68))))
+ ret = amdgpu_amdkfd_alloc_gws(node->adev,
+ node->adev->gds.gws_size, &node->gws);
return ret;
}
-static void kfd_smi_init(struct kfd_dev *dev)
+static void kfd_smi_init(struct kfd_node *dev)
{
INIT_LIST_HEAD(&dev->smi_clients);
spin_lock_init(&dev->smi_lock);
}
+static int kfd_init_node(struct kfd_node *node)
+{
+ int err = -1;
+
+ if (kfd_interrupt_init(node)) {
+ dev_err(kfd_device, "Error initializing interrupts\n");
+ goto kfd_interrupt_error;
+ }
+
+ node->dqm = device_queue_manager_init(node);
+ if (!node->dqm) {
+ dev_err(kfd_device, "Error initializing queue manager\n");
+ goto device_queue_manager_error;
+ }
+
+ if (kfd_gws_init(node)) {
+ dev_err(kfd_device, "Could not allocate %d gws\n",
+ node->adev->gds.gws_size);
+ goto gws_error;
+ }
+
+ if (kfd_resume(node))
+ goto kfd_resume_error;
+
+ if (kfd_topology_add_device(node)) {
+ dev_err(kfd_device, "Error adding device to topology\n");
+ goto kfd_topology_add_device_error;
+ }
+
+ kfd_smi_init(node);
+
+ return 0;
+
+kfd_topology_add_device_error:
+kfd_resume_error:
+gws_error:
+ device_queue_manager_uninit(node->dqm);
+device_queue_manager_error:
+ kfd_interrupt_exit(node);
+kfd_interrupt_error:
+ if (node->gws)
+ amdgpu_amdkfd_free_gws(node->adev, node->gws);
+
+ /* Cleanup the node memory here */
+ kfree(node);
+ return err;
+}
+
+static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
+{
+ struct kfd_node *knode;
+ unsigned int i;
+
+ for (i = 0; i < num_nodes; i++) {
+ knode = kfd->nodes[i];
+ device_queue_manager_uninit(knode->dqm);
+ kfd_interrupt_exit(knode);
+ kfd_topology_remove_device(knode);
+ if (knode->gws)
+ amdgpu_amdkfd_free_gws(knode->adev, knode->gws);
+ kfree(knode);
+ kfd->nodes[i] = NULL;
+ }
+}
+
+static void kfd_setup_interrupt_bitmap(struct kfd_node *node,
+ unsigned int kfd_node_idx)
+{
+ struct amdgpu_device *adev = node->adev;
+ uint32_t xcc_mask = node->xcc_mask;
+ uint32_t xcc, mapped_xcc;
+ /*
+ * Interrupt bitmap is setup for processing interrupts from
+ * different XCDs and AIDs.
+ * Interrupt bitmap is defined as follows:
+ * 1. Bits 0-15 - correspond to the NodeId field.
+ * Each bit corresponds to NodeId number. For example, if
+ * a KFD node has interrupt bitmap set to 0x7, then this
+ * KFD node will process interrupts with NodeId = 0, 1 and 2
+ * in the IH cookie.
+ * 2. Bits 16-31 - unused.
+ *
+ * Please note that the kfd_node_idx argument passed to this
+ * function is not related to NodeId field received in the
+ * IH cookie.
+ *
+ * In CPX mode, a KFD node will process an interrupt if:
+ * - the Node Id matches the corresponding bit set in
+ * Bits 0-15.
+ * - AND VMID reported in the interrupt lies within the
+ * VMID range of the node.
+ */
+ for_each_inst(xcc, xcc_mask) {
+ mapped_xcc = GET_INST(GC, xcc);
+ node->interrupt_bitmap |= (mapped_xcc % 2 ? 5 : 3) << (4 * (mapped_xcc / 2));
+ }
+ dev_info(kfd_device, "Node: %d, interrupt_bitmap: %x\n", kfd_node_idx,
+ node->interrupt_bitmap);
+}
+
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources)
{
- unsigned int size, map_process_packet_size;
+ unsigned int size, map_process_packet_size, i;
+ struct kfd_node *node;
+ uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
+ unsigned int max_proc_per_quantum;
+ int partition_mode;
+ int xcp_idx;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
KGD_ENGINE_MEC1);
@@ -532,10 +642,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
- kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
- kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
- kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
- - kfd->vm_info.first_vmid_kfd + 1;
+ kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr);
+
+ if (kfd->num_nodes == 0) {
+ dev_err(kfd_device,
+ "KFD num nodes cannot be 0, num_xcc_in_node: %d\n",
+ kfd->adev->gfx.num_xcc_per_xcp);
+ goto out;
+ }
/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
* 32 and 64-bit requests are possible and must be
@@ -554,11 +668,34 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
return false;
}
+ first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
+ last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
+ vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
+
+ /* For GFX9.4.3, we need special handling for VMIDs depending on
+ * partition mode.
+ * In CPX mode, the VMID range needs to be shared between XCDs.
+ * Additionally, there are 13 VMIDs (3-15) available for KFD. To
+ * divide them equally, we change starting VMID to 4 and not use
+ * VMID 3.
+ * If the VMID range changes for GFX9.4.3, then this code MUST be
+ * revisited.
+ */
+ if (kfd->adev->xcp_mgr) {
+ partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
+ AMDGPU_XCP_FL_LOCKED);
+ if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
+ kfd->num_nodes != 1) {
+ vmid_num_kfd /= 2;
+ first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2;
+ }
+ }
+
/* Verify module parameters regarding mapped process number*/
if (hws_max_conc_proc >= 0)
- kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
+ max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd);
else
- kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
+ max_proc_per_quantum = vmid_num_kfd;
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
@@ -606,81 +743,110 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (amdgpu_use_xgmi_p2p)
kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
- kfd->noretry = kfd->adev->gmc.noretry;
-
- if (kfd_interrupt_init(kfd)) {
- dev_err(kfd_device, "Error initializing interrupts\n");
- goto kfd_interrupt_error;
- }
-
- kfd->dqm = device_queue_manager_init(kfd);
- if (!kfd->dqm) {
- dev_err(kfd_device, "Error initializing queue manager\n");
- goto device_queue_manager_error;
- }
-
- /* If supported on this device, allocate global GWS that is shared
- * by all KFD processes
+ /*
+ * For GFX9.4.3, the KFD abstracts all partitions within a socket as
+ * xGMI connected in the topology so assign a unique hive id per
+ * device based on the pci device location if device is in PCIe mode.
*/
- if (kfd_gws_init(kfd)) {
- dev_err(kfd_device, "Could not allocate %d gws\n",
- kfd->adev->gds.gws_size);
- goto gws_error;
- }
-
- /* If CRAT is broken, won't set iommu enabled */
- kfd_double_confirm_iommu_support(kfd);
+ if (!kfd->hive_id && (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) && kfd->num_nodes > 1)
+ kfd->hive_id = pci_dev_id(kfd->adev->pdev);
- if (kfd_iommu_device_init(kfd)) {
- kfd->use_iommu_v2 = false;
- dev_err(kfd_device, "Error initializing iommuv2\n");
- goto device_iommu_error;
- }
+ kfd->noretry = kfd->adev->gmc.noretry;
kfd_cwsr_init(kfd);
- svm_migrate_init(kfd->adev);
+ dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n",
+ kfd->num_nodes);
+
+ /* Allocate the KFD nodes */
+ for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) {
+ node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL);
+ if (!node)
+ goto node_alloc_error;
+
+ node->node_id = i;
+ node->adev = kfd->adev;
+ node->kfd = kfd;
+ node->kfd2kgd = kfd->kfd2kgd;
+ node->vm_info.vmid_num_kfd = vmid_num_kfd;
+ node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx);
+ /* TODO : Check if error handling is needed */
+ if (node->xcp) {
+ amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX,
+ &node->xcc_mask);
+ ++xcp_idx;
+ } else {
+ node->xcc_mask =
+ (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1;
+ }
- if (kfd_resume_iommu(kfd))
- goto device_iommu_error;
+ if (node->xcp) {
+ dev_info(kfd_device, "KFD node %d partition %d size %lldM\n",
+ node->node_id, node->xcp->mem_id,
+ KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
+ }
- if (kfd_resume(kfd))
- goto kfd_resume_error;
+ if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) &&
+ partition_mode == AMDGPU_CPX_PARTITION_MODE &&
+ kfd->num_nodes != 1) {
+ /* For GFX9.4.3 and CPX mode, first XCD gets VMID range
+ * 4-9 and second XCD gets VMID range 10-15.
+ */
+
+ node->vm_info.first_vmid_kfd = (i%2 == 0) ?
+ first_vmid_kfd :
+ first_vmid_kfd+vmid_num_kfd;
+ node->vm_info.last_vmid_kfd = (i%2 == 0) ?
+ last_vmid_kfd-vmid_num_kfd :
+ last_vmid_kfd;
+ node->compute_vmid_bitmap =
+ ((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) -
+ ((0x1 << (node->vm_info.first_vmid_kfd)) - 1);
+ } else {
+ node->vm_info.first_vmid_kfd = first_vmid_kfd;
+ node->vm_info.last_vmid_kfd = last_vmid_kfd;
+ node->compute_vmid_bitmap =
+ gpu_resources->compute_vmid_bitmap;
+ }
+ node->max_proc_per_quantum = max_proc_per_quantum;
+ atomic_set(&node->sram_ecc_flag, 0);
- amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(kfd->adev,
+ &node->local_mem_info, node->xcp);
- if (kfd_topology_add_device(kfd)) {
- dev_err(kfd_device, "Error adding device to topology\n");
- goto kfd_topology_add_device_error;
+ if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3))
+ kfd_setup_interrupt_bitmap(node, i);
+
+ /* Initialize the KFD node */
+ if (kfd_init_node(node)) {
+ dev_err(kfd_device, "Error initializing KFD node\n");
+ goto node_init_error;
+ }
+ kfd->nodes[i] = node;
}
- kfd_smi_init(kfd);
+ svm_range_set_max_pages(kfd->adev);
+
+ spin_lock_init(&kfd->watch_points_lock);
kfd->init_complete = true;
dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
kfd->adev->pdev->device);
pr_debug("Starting kfd with the following scheduling policy %d\n",
- kfd->dqm->sched_policy);
+ node->dqm->sched_policy);
goto out;
-kfd_topology_add_device_error:
-kfd_resume_error:
-device_iommu_error:
-gws_error:
- device_queue_manager_uninit(kfd->dqm);
-device_queue_manager_error:
- kfd_interrupt_exit(kfd);
-kfd_interrupt_error:
+node_init_error:
+node_alloc_error:
+ kfd_cleanup_nodes(kfd, i);
kfd_doorbell_fini(kfd);
kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
alloc_gtt_mem_failure:
- if (kfd->gws)
- amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
kfd->adev->pdev->vendor, kfd->adev->pdev->device);
@@ -691,15 +857,13 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
- device_queue_manager_uninit(kfd->dqm);
- kfd_interrupt_exit(kfd);
- kfd_topology_remove_device(kfd);
+ /* Cleanup KFD nodes */
+ kfd_cleanup_nodes(kfd, kfd->num_nodes);
+ /* Cleanup common/shared resources */
kfd_doorbell_fini(kfd);
ida_destroy(&kfd->doorbell_ida);
kfd_gtt_sa_fini(kfd);
amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
- if (kfd->gws)
- amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
}
kfree(kfd);
@@ -707,16 +871,23 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
int kgd2kfd_pre_reset(struct kfd_dev *kfd)
{
+ struct kfd_node *node;
+ int i;
+
if (!kfd->init_complete)
return 0;
- kfd_smi_event_update_gpu_reset(kfd, false);
-
- kfd->dqm->ops.pre_reset(kfd->dqm);
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ kfd_smi_event_update_gpu_reset(node, false);
+ node->dqm->ops.pre_reset(node->dqm);
+ }
kgd2kfd_suspend(kfd, false);
- kfd_signal_reset_event(kfd);
+ for (i = 0; i < kfd->num_nodes; i++)
+ kfd_signal_reset_event(kfd->nodes[i]);
+
return 0;
}
@@ -729,57 +900,82 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
int kgd2kfd_post_reset(struct kfd_dev *kfd)
{
int ret;
+ struct kfd_node *node;
+ int i;
if (!kfd->init_complete)
return 0;
- ret = kfd_resume(kfd);
- if (ret)
- return ret;
- atomic_dec(&kfd_locked);
+ for (i = 0; i < kfd->num_nodes; i++) {
+ ret = kfd_resume(kfd->nodes[i]);
+ if (ret)
+ return ret;
+ }
- atomic_set(&kfd->sram_ecc_flag, 0);
+ mutex_lock(&kfd_processes_mutex);
+ --kfd_locked;
+ mutex_unlock(&kfd_processes_mutex);
- kfd_smi_event_update_gpu_reset(kfd, true);
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ atomic_set(&node->sram_ecc_flag, 0);
+ kfd_smi_event_update_gpu_reset(node, true);
+ }
return 0;
}
bool kfd_is_locked(void)
{
- return (atomic_read(&kfd_locked) > 0);
+ lockdep_assert_held(&kfd_processes_mutex);
+ return (kfd_locked > 0);
}
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
+ struct kfd_node *node;
+ int i;
+ int count;
+
if (!kfd->init_complete)
return;
/* for runtime suspend, skip locking kfd */
if (!run_pm) {
+ mutex_lock(&kfd_processes_mutex);
+ count = ++kfd_locked;
+ mutex_unlock(&kfd_processes_mutex);
+
/* For first KFD device suspend all the KFD processes */
- if (atomic_inc_return(&kfd_locked) == 1)
+ if (count == 1)
kfd_suspend_all_processes();
}
- kfd->dqm->ops.stop(kfd->dqm);
- kfd_iommu_suspend(kfd);
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ node->dqm->ops.stop(node->dqm);
+ }
}
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
- int ret, count;
+ int ret, count, i;
if (!kfd->init_complete)
return 0;
- ret = kfd_resume(kfd);
- if (ret)
- return ret;
+ for (i = 0; i < kfd->num_nodes; i++) {
+ ret = kfd_resume(kfd->nodes[i]);
+ if (ret)
+ return ret;
+ }
/* for runtime resume, skip unlocking kfd */
if (!run_pm) {
- count = atomic_dec_return(&kfd_locked);
+ mutex_lock(&kfd_processes_mutex);
+ count = --kfd_locked;
+ mutex_unlock(&kfd_processes_mutex);
+
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0)
ret = kfd_resume_all_processes();
@@ -788,35 +984,15 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
return ret;
}
-int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
-{
- if (!kfd->init_complete)
- return 0;
-
- return kfd_resume_iommu(kfd);
-}
-
-static int kfd_resume_iommu(struct kfd_dev *kfd)
-{
- int err = 0;
-
- err = kfd_iommu_resume(kfd);
- if (err)
- dev_err(kfd_device,
- "Failed to resume IOMMU for device %x:%x\n",
- kfd->adev->pdev->vendor, kfd->adev->pdev->device);
- return err;
-}
-
-static int kfd_resume(struct kfd_dev *kfd)
+static int kfd_resume(struct kfd_node *node)
{
int err = 0;
- err = kfd->dqm->ops.start(kfd->dqm);
+ err = node->dqm->ops.start(node->dqm);
if (err)
dev_err(kfd_device,
"Error starting queue manager for device %x:%x\n",
- kfd->adev->pdev->vendor, kfd->adev->pdev->device);
+ node->adev->pdev->vendor, node->adev->pdev->device);
return err;
}
@@ -839,9 +1015,10 @@ static inline void kfd_queue_work(struct workqueue_struct *wq,
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
- uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
+ uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i;
bool is_patched = false;
unsigned long flags;
+ struct kfd_node *node;
if (!kfd->init_complete)
return;
@@ -851,16 +1028,22 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
return;
}
- spin_lock_irqsave(&kfd->interrupt_lock, flags);
-
- if (kfd->interrupts_active
- && interrupt_is_wanted(kfd, ih_ring_entry,
- patched_ihre, &is_patched)
- && enqueue_ih_ring_entry(kfd,
- is_patched ? patched_ihre : ih_ring_entry))
- kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ spin_lock_irqsave(&node->interrupt_lock, flags);
+
+ if (node->interrupts_active
+ && interrupt_is_wanted(node, ih_ring_entry,
+ patched_ihre, &is_patched)
+ && enqueue_ih_ring_entry(node,
+ is_patched ? patched_ihre : ih_ring_entry)) {
+ kfd_queue_work(node->ih_wq, &node->interrupt_work);
+ spin_unlock_irqrestore(&node->interrupt_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&node->interrupt_lock, flags);
+ }
- spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
}
int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
@@ -998,10 +1181,11 @@ static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
}
-int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
+int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
struct kfd_mem_obj **mem_obj)
{
unsigned int found, start_search, cur_size;
+ struct kfd_dev *kfd = node->kfd;
if (size == 0)
return -EINVAL;
@@ -1101,8 +1285,10 @@ kfd_gtt_no_free_chunk:
return -ENOMEM;
}
-int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
+int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj)
{
+ struct kfd_dev *kfd = node->kfd;
+
/* Act like kfree when trying to free a NULL object */
if (!mem_obj)
return 0;
@@ -1124,29 +1310,40 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
{
+ /*
+ * TODO: Currently update SRAM ECC flag for first node.
+ * This needs to be updated later when we can
+ * identify SRAM ECC error on other nodes also.
+ */
if (kfd)
- atomic_inc(&kfd->sram_ecc_flag);
+ atomic_inc(&kfd->nodes[0]->sram_ecc_flag);
}
-void kfd_inc_compute_active(struct kfd_dev *kfd)
+void kfd_inc_compute_active(struct kfd_node *node)
{
- if (atomic_inc_return(&kfd->compute_profile) == 1)
- amdgpu_amdkfd_set_compute_idle(kfd->adev, false);
+ if (atomic_inc_return(&node->kfd->compute_profile) == 1)
+ amdgpu_amdkfd_set_compute_idle(node->adev, false);
}
-void kfd_dec_compute_active(struct kfd_dev *kfd)
+void kfd_dec_compute_active(struct kfd_node *node)
{
- int count = atomic_dec_return(&kfd->compute_profile);
+ int count = atomic_dec_return(&node->kfd->compute_profile);
if (count == 0)
- amdgpu_amdkfd_set_compute_idle(kfd->adev, true);
+ amdgpu_amdkfd_set_compute_idle(node->adev, true);
WARN_ONCE(count < 0, "Compute profile ref. count error");
}
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
+ /*
+ * TODO: For now, raise the throttling event only on first node.
+ * This will need to change after we are able to determine
+ * which node raised the throttling event.
+ */
if (kfd && kfd->init_complete)
- kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
+ kfd_smi_event_update_thermal_throttling(kfd->nodes[0],
+ throttle_bitmask);
}
/* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
@@ -1154,19 +1351,41 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
* When the device has more than two engines, we reserve two for PCIe to enable
* full-duplex and the rest are used as XGMI.
*/
-unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev)
+unsigned int kfd_get_num_sdma_engines(struct kfd_node *node)
{
/* If XGMI is not supported, all SDMA engines are PCIe */
- if (!kdev->adev->gmc.xgmi.supported)
- return kdev->adev->sdma.num_instances;
+ if (!node->adev->gmc.xgmi.supported)
+ return node->adev->sdma.num_instances/(int)node->kfd->num_nodes;
- return min(kdev->adev->sdma.num_instances, 2);
+ return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2);
}
-unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev)
+unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
{
/* After reserved for PCIe, the rest of engines are XGMI */
- return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev);
+ return node->adev->sdma.num_instances/(int)node->kfd->num_nodes -
+ kfd_get_num_sdma_engines(node);
+}
+
+int kgd2kfd_check_and_lock_kfd(void)
+{
+ mutex_lock(&kfd_processes_mutex);
+ if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
+ mutex_unlock(&kfd_processes_mutex);
+ return -EBUSY;
+ }
+
+ ++kfd_locked;
+ mutex_unlock(&kfd_processes_mutex);
+
+ return 0;
+}
+
+void kgd2kfd_unlock_kfd(void)
+{
+ mutex_lock(&kfd_processes_mutex);
+ --kfd_locked;
+ mutex_unlock(&kfd_processes_mutex);
}
#if defined(CONFIG_DEBUG_FS)
@@ -1174,7 +1393,7 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev)
/* This function will send a package to HIQ to hang the HWS
* which will trigger a GPU reset and bring the HWS back to normal state
*/
-int kfd_debugfs_hang_hws(struct kfd_dev *dev)
+int kfd_debugfs_hang_hws(struct kfd_node *dev)
{
if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
pr_err("HWS is not enabled");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 7a95698d83f7..b166f30f083e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -36,6 +36,7 @@
#include "kfd_kernel_queue.h"
#include "amdgpu_amdkfd.h"
#include "mes_api_def.h"
+#include "kfd_debug.h"
/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
@@ -46,10 +47,13 @@ static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
static int execute_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param);
+ uint32_t filter_param,
+ uint32_t grace_period);
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param, bool reset);
+ uint32_t filter_param,
+ uint32_t grace_period,
+ bool reset);
static int map_queues_cpsch(struct device_queue_manager *dqm);
@@ -74,31 +78,31 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
{
int i;
- int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec
- + pipe) * dqm->dev->shared_resources.num_queue_per_pipe;
+ int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec
+ + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe;
/* queue is available for KFD usage if bit is 1 */
- for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
+ for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i)
if (test_bit(pipe_offset + i,
- dqm->dev->shared_resources.cp_queue_bitmap))
+ dqm->dev->kfd->shared_resources.cp_queue_bitmap))
return true;
return false;
}
unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
{
- return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
+ return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap,
KGD_MAX_QUEUES);
}
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
{
- return dqm->dev->shared_resources.num_queue_per_pipe;
+ return dqm->dev->kfd->shared_resources.num_queue_per_pipe;
}
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
{
- return dqm->dev->shared_resources.num_pipe_per_mec;
+ return dqm->dev->kfd->shared_resources.num_pipe_per_mec;
}
static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
@@ -110,29 +114,40 @@ static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
return kfd_get_num_sdma_engines(dqm->dev) *
- dqm->dev->device_info.num_sdma_queues_per_engine;
+ dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
}
unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
{
return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
- dqm->dev->device_info.num_sdma_queues_per_engine;
+ dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
}
-static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm)
+static void init_sdma_bitmaps(struct device_queue_manager *dqm)
{
- return dqm->dev->device_info.reserved_sdma_queues_bitmap;
+ bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES);
+ bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm));
+
+ bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
+ bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm));
+
+ /* Mask out the reserved queues */
+ bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap,
+ dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap,
+ KFD_MAX_SDMA_QUEUES);
}
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- return dqm->dev->kfd2kgd->program_sh_mem_settings(
- dqm->dev->adev, qpd->vmid,
- qpd->sh_mem_config,
- qpd->sh_mem_ape1_base,
- qpd->sh_mem_ape1_limit,
- qpd->sh_mem_bases);
+ uint32_t xcc_mask = dqm->dev->xcc_mask;
+ int xcc_id;
+
+ for_each_inst(xcc_id, xcc_mask)
+ dqm->dev->kfd2kgd->program_sh_mem_settings(
+ dqm->dev->adev, qpd->vmid, qpd->sh_mem_config,
+ qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit,
+ qpd->sh_mem_bases, xcc_id);
}
static void kfd_hws_hang(struct device_queue_manager *dqm)
@@ -211,6 +226,9 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr;
queue_input.tma_addr = qpd->tma_addr;
+ queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);
+ queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled ||
+ kfd_dbg_has_ttmps_always_setup(q->device);
queue_type = convert_to_mes_queue_type(q->properties.type);
if (queue_type < 0) {
@@ -220,10 +238,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
}
queue_input.queue_type = (uint32_t)queue_type;
- if (q->gws) {
- queue_input.gws_base = 0;
- queue_input.gws_size = qpd->num_gws;
- }
+ queue_input.exclusively_scheduled = q->properties.is_gws;
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
@@ -233,7 +248,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
q->properties.doorbell_off);
pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
kfd_hws_hang(dqm);
-}
+ }
return r;
}
@@ -330,7 +345,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd,
struct queue *q,
uint32_t const *restore_id)
{
- struct kfd_dev *dev = qpd->dqm->dev;
+ struct kfd_node *dev = qpd->dqm->dev;
if (!KFD_IS_SOC15(dev)) {
/* On pre-SOC15 chips we need to use the queue ID to
@@ -349,8 +364,17 @@ static int allocate_doorbell(struct qcm_process_device *qpd,
* for a SDMA engine is 512.
*/
- uint32_t *idx_offset = dev->shared_resources.sdma_doorbell_idx;
- uint32_t valid_id = idx_offset[q->properties.sdma_engine_id]
+ uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx;
+
+ /*
+ * q->properties.sdma_engine_id corresponds to the virtual
+ * sdma engine number. However, for doorbell allocation,
+ * we need the physical sdma engine id in order to get the
+ * correct doorbell offset.
+ */
+ uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id *
+ get_num_all_sdma_engines(qpd->dqm) +
+ q->properties.sdma_engine_id]
+ (q->properties.sdma_queue_id & 1)
* KFD_QUEUE_DOORBELL_MIRROR_OFFSET
+ (q->properties.sdma_queue_id >> 1);
@@ -371,7 +395,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd,
unsigned int found;
found = find_first_zero_bit(qpd->doorbell_bitmap,
- KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_debug("No doorbells available");
return -EBUSY;
@@ -381,9 +405,9 @@ static int allocate_doorbell(struct qcm_process_device *qpd,
}
}
- q->properties.doorbell_off =
- kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd),
- q->doorbell_id);
+ q->properties.doorbell_off = amdgpu_doorbell_index_on_bar(dev->adev,
+ qpd->proc_doorbells,
+ q->doorbell_id);
return 0;
}
@@ -391,7 +415,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd,
struct queue *q)
{
unsigned int old;
- struct kfd_dev *dev = qpd->dqm->dev;
+ struct kfd_node *dev = qpd->dqm->dev;
if (!KFD_IS_SOC15(dev) ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
@@ -405,10 +429,14 @@ static void deallocate_doorbell(struct qcm_process_device *qpd,
static void program_trap_handler_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
+ uint32_t xcc_mask = dqm->dev->xcc_mask;
+ int xcc_id;
+
if (dqm->dev->kfd2kgd->program_trap_handler_settings)
- dqm->dev->kfd2kgd->program_trap_handler_settings(
- dqm->dev->adev, qpd->vmid,
- qpd->tba_addr, qpd->tma_addr);
+ for_each_inst(xcc_id, xcc_mask)
+ dqm->dev->kfd2kgd->program_trap_handler_settings(
+ dqm->dev->adev, qpd->vmid, qpd->tba_addr,
+ qpd->tma_addr, xcc_id);
}
static int allocate_vmid(struct device_queue_manager *dqm,
@@ -441,7 +469,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
program_sh_mem_settings(dqm, qpd);
- if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled)
+ if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled)
program_trap_handler_settings(dqm, qpd);
/* qpd->page_table_base is set earlier when register_process()
@@ -460,7 +488,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
return 0;
}
-static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
+static int flush_texture_cache_nocpsch(struct kfd_node *kdev,
struct qcm_process_device *qpd)
{
const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf;
@@ -661,7 +689,7 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm,
#define SQ_IND_CMD_CMD_KILL 0x00000003
#define SQ_IND_CMD_MODE_BROADCAST 0x00000001
-static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
+static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p)
{
int status = 0;
unsigned int vmid;
@@ -671,6 +699,8 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process
struct kfd_process_device *pdd;
int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
+ uint32_t xcc_mask = dev->xcc_mask;
+ int xcc_id;
reg_sq_cmd.u32All = 0;
reg_gfx_index.u32All = 0;
@@ -715,9 +745,10 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process
reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
reg_sq_cmd.bits.vm_id = vmid;
- dev->kfd2kgd->wave_control_execute(dev->adev,
- reg_gfx_index.u32All,
- reg_sq_cmd.u32All);
+ for_each_inst(xcc_id, xcc_mask)
+ dev->kfd2kgd->wave_control_execute(
+ dev->adev, reg_gfx_index.u32All,
+ reg_sq_cmd.u32All, xcc_id);
return 0;
}
@@ -837,9 +868,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
/* Make sure the queue is unmapped before updating the MQD */
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = unmap_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
else if (prev_active)
retval = remove_queue_mes(dqm, q, &pdd->qpd);
@@ -858,7 +889,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
}
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
- (dqm->dev->cwsr_enabled ?
+ (dqm->dev->kfd->cwsr_enabled ?
KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -895,7 +926,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
}
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = map_queues_cpsch(dqm);
else if (q->properties.is_active)
retval = add_queue_mes(dqm, q, &pdd->qpd);
@@ -917,6 +948,92 @@ out_unlock:
return retval;
}
+/* suspend_single_queue does not lock the dqm like the
+ * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should
+ * lock the dqm before calling, and unlock after calling.
+ *
+ * The reason we don't lock the dqm is because this function may be
+ * called on multiple queues in a loop, so rather than locking/unlocking
+ * multiple times, we will just keep the dqm locked for all of the calls.
+ */
+static int suspend_single_queue(struct device_queue_manager *dqm,
+ struct kfd_process_device *pdd,
+ struct queue *q)
+{
+ bool is_new;
+
+ if (q->properties.is_suspended)
+ return 0;
+
+ pr_debug("Suspending PASID %u queue [%i]\n",
+ pdd->process->pasid,
+ q->properties.queue_id);
+
+ is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW);
+
+ if (is_new || q->properties.is_being_destroyed) {
+ pr_debug("Suspend: skip %s queue id %i\n",
+ is_new ? "new" : "destroyed",
+ q->properties.queue_id);
+ return -EBUSY;
+ }
+
+ q->properties.is_suspended = true;
+ if (q->properties.is_active) {
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
+ int r = remove_queue_mes(dqm, q, &pdd->qpd);
+
+ if (r)
+ return r;
+ }
+
+ decrement_queue_count(dqm, &pdd->qpd, q);
+ q->properties.is_active = false;
+ }
+
+ return 0;
+}
+
+/* resume_single_queue does not lock the dqm like the functions
+ * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should
+ * lock the dqm before calling, and unlock after calling.
+ *
+ * The reason we don't lock the dqm is because this function may be
+ * called on multiple queues in a loop, so rather than locking/unlocking
+ * multiple times, we will just keep the dqm locked for all of the calls.
+ */
+static int resume_single_queue(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ struct kfd_process_device *pdd;
+
+ if (!q->properties.is_suspended)
+ return 0;
+
+ pdd = qpd_to_pdd(qpd);
+
+ pr_debug("Restoring from suspend PASID %u queue [%i]\n",
+ pdd->process->pasid,
+ q->properties.queue_id);
+
+ q->properties.is_suspended = false;
+
+ if (QUEUE_IS_ACTIVE(q->properties)) {
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
+ int r = add_queue_mes(dqm, q, &pdd->qpd);
+
+ if (r)
+ return r;
+ }
+
+ q->properties.is_active = true;
+ increment_queue_count(dqm, qpd, q);
+ }
+
+ return 0;
+}
+
static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
@@ -951,7 +1068,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
continue;
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
- (dqm->dev->cwsr_enabled ?
+ (dqm->dev->kfd->cwsr_enabled ?
KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -979,6 +1096,14 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
+
+ /* The debugger creates processes that temporarily have not acquired
+ * all VMs for all devices and has no VMs itself.
+ * Skip queue eviction on process eviction.
+ */
+ if (!pdd->drm_priv)
+ goto out;
+
pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
@@ -993,7 +1118,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
q->properties.is_active = false;
decrement_queue_count(dqm, qpd, q);
- if (dqm->dev->shared_resources.enable_mes) {
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = remove_queue_mes(dqm, q, qpd);
if (retval) {
pr_err("Failed to evict queue %d\n",
@@ -1003,11 +1128,12 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
}
}
pdd->last_evict_timestamp = get_jiffies_64();
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD);
out:
dqm_unlock(dqm);
@@ -1100,13 +1226,10 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
{
struct queue *q;
struct kfd_process_device *pdd;
- uint64_t pd_base;
uint64_t eviction_duration;
int retval = 0;
pdd = qpd_to_pdd(qpd);
- /* Retrieve PD base */
- pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -1116,12 +1239,19 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
}
+ /* The debugger creates processes that temporarily have not acquired
+ * all VMs for all devices and has no VMs itself.
+ * Skip queue restore on process restore.
+ */
+ if (!pdd->drm_priv)
+ goto vm_not_acquired;
+
pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
- qpd->page_table_base = pd_base;
- pr_debug("Updated PD address to 0x%llx\n", pd_base);
+ qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
+ pr_debug("Updated PD address to 0x%llx\n", qpd->page_table_base);
/* activate all active queues on the qpd */
list_for_each_entry(q, &qpd->queues_list, list) {
@@ -1132,7 +1262,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
q->properties.is_active = true;
increment_queue_count(dqm, &pdd->qpd, q);
- if (dqm->dev->shared_resources.enable_mes) {
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = add_queue_mes(dqm, q, qpd);
if (retval) {
pr_err("Failed to restore queue %d\n",
@@ -1141,12 +1271,13 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
}
}
}
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
- qpd->evicted = 0;
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
atomic64_add(eviction_duration, &pdd->evict_duration_counter);
+vm_not_acquired:
+ qpd->evicted = 0;
out:
dqm_unlock(dqm);
return retval;
@@ -1229,35 +1360,32 @@ static int
set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
unsigned int vmid)
{
- return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
- dqm->dev->adev, pasid, vmid);
-}
+ uint32_t xcc_mask = dqm->dev->xcc_mask;
+ int xcc_id, ret;
-static void init_interrupts(struct device_queue_manager *dqm)
-{
- unsigned int i;
+ for_each_inst(xcc_id, xcc_mask) {
+ ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
+ dqm->dev->adev, pasid, vmid, xcc_id);
+ if (ret)
+ break;
+ }
- for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
- if (is_pipe_enabled(dqm, 0, i))
- dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i);
+ return ret;
}
-static void init_sdma_bitmaps(struct device_queue_manager *dqm)
+static void init_interrupts(struct device_queue_manager *dqm)
{
- unsigned int num_sdma_queues =
- min_t(unsigned int, sizeof(dqm->sdma_bitmap)*8,
- get_num_sdma_queues(dqm));
- unsigned int num_xgmi_sdma_queues =
- min_t(unsigned int, sizeof(dqm->xgmi_sdma_bitmap)*8,
- get_num_xgmi_sdma_queues(dqm));
-
- if (num_sdma_queues)
- dqm->sdma_bitmap = GENMASK_ULL(num_sdma_queues-1, 0);
- if (num_xgmi_sdma_queues)
- dqm->xgmi_sdma_bitmap = GENMASK_ULL(num_xgmi_sdma_queues-1, 0);
+ uint32_t xcc_mask = dqm->dev->xcc_mask;
+ unsigned int i, xcc_id;
- dqm->sdma_bitmap &= ~get_reserved_sdma_queues_bitmap(dqm);
- pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
+ for_each_inst(xcc_id, xcc_mask) {
+ for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) {
+ if (is_pipe_enabled(dqm, 0, i)) {
+ dqm->dev->kfd2kgd->init_interrupts(
+ dqm->dev->adev, i, xcc_id);
+ }
+ }
+ }
}
static int initialize_nocpsch(struct device_queue_manager *dqm)
@@ -1282,7 +1410,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
if (test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.cp_queue_bitmap))
+ dqm->dev->kfd->shared_resources.cp_queue_bitmap))
dqm->allocated_queues[pipe] |= 1 << queue;
}
@@ -1322,9 +1450,16 @@ static int start_nocpsch(struct device_queue_manager *dqm)
static int stop_nocpsch(struct device_queue_manager *dqm)
{
+ dqm_lock(dqm);
+ if (!dqm->sched_running) {
+ dqm_unlock(dqm);
+ return 0;
+ }
+
if (dqm->dev->adev->asic_type == CHIP_HAWAII)
pm_uninit(&dqm->packet_mgr, false);
dqm->sched_running = false;
+ dqm_unlock(dqm);
return 0;
}
@@ -1342,46 +1477,48 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- if (dqm->sdma_bitmap == 0) {
+ if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
pr_err("No more SDMA queue to allocate\n");
return -ENOMEM;
}
if (restore_sdma_id) {
/* Re-use existing sdma_id */
- if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) {
+ if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) {
pr_err("SDMA queue already in use\n");
return -EBUSY;
}
- dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id);
+ clear_bit(*restore_sdma_id, dqm->sdma_bitmap);
q->sdma_id = *restore_sdma_id;
} else {
/* Find first available sdma_id */
- bit = __ffs64(dqm->sdma_bitmap);
- dqm->sdma_bitmap &= ~(1ULL << bit);
+ bit = find_first_bit(dqm->sdma_bitmap,
+ get_num_sdma_queues(dqm));
+ clear_bit(bit, dqm->sdma_bitmap);
q->sdma_id = bit;
}
- q->properties.sdma_engine_id = q->sdma_id %
- kfd_get_num_sdma_engines(dqm->dev);
+ q->properties.sdma_engine_id =
+ q->sdma_id % kfd_get_num_sdma_engines(dqm->dev);
q->properties.sdma_queue_id = q->sdma_id /
kfd_get_num_sdma_engines(dqm->dev);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- if (dqm->xgmi_sdma_bitmap == 0) {
+ if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
pr_err("No more XGMI SDMA queue to allocate\n");
return -ENOMEM;
}
if (restore_sdma_id) {
/* Re-use existing sdma_id */
- if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) {
+ if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) {
pr_err("SDMA queue already in use\n");
return -EBUSY;
}
- dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id);
+ clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap);
q->sdma_id = *restore_sdma_id;
} else {
- bit = __ffs64(dqm->xgmi_sdma_bitmap);
- dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
+ bit = find_first_bit(dqm->xgmi_sdma_bitmap,
+ get_num_xgmi_sdma_queues(dqm));
+ clear_bit(bit, dqm->xgmi_sdma_bitmap);
q->sdma_id = bit;
}
/* sdma_engine_id is sdma id including
@@ -1409,11 +1546,11 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm,
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
if (q->sdma_id >= get_num_sdma_queues(dqm))
return;
- dqm->sdma_bitmap |= (1ULL << q->sdma_id);
+ set_bit(q->sdma_id, dqm->sdma_bitmap);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
return;
- dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
+ set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap);
}
}
@@ -1426,14 +1563,14 @@ static int set_sched_resources(struct device_queue_manager *dqm)
int i, mec;
struct scheduling_resources res;
- res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
+ res.vmid_mask = dqm->dev->compute_vmid_bitmap;
res.queue_mask = 0;
for (i = 0; i < KGD_MAX_QUEUES; ++i) {
- mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
- / dqm->dev->shared_resources.num_pipe_per_mec;
+ mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe)
+ / dqm->dev->kfd->shared_resources.num_pipe_per_mec;
- if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
+ if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap))
continue;
/* only acquire queues from the first MEC */
@@ -1475,9 +1612,14 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->gws_queue_count = 0;
dqm->active_runlist = false;
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
+ dqm->trap_debug_vmid = 0;
init_sdma_bitmaps(dqm);
+ if (dqm->dev->kfd2kgd->get_iq_wait_times)
+ dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
+ &dqm->wait_times,
+ ffs(dqm->dev->xcc_mask) - 1);
return 0;
}
@@ -1489,7 +1631,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm_lock(dqm);
- if (!dqm->dev->shared_resources.enable_mes) {
+ if (!dqm->dev->kfd->shared_resources.enable_mes) {
retval = pm_init(&dqm->packet_mgr, dqm);
if (retval)
goto fail_packet_manager_init;
@@ -1516,14 +1658,35 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm->is_hws_hang = false;
dqm->is_resetting = false;
dqm->sched_running = true;
- if (!dqm->dev->shared_resources.enable_mes)
- execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
+
+ /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */
+ if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu &&
+ (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) {
+ uint32_t reg_offset = 0;
+ uint32_t grace_period = 1;
+
+ retval = pm_update_grace_period(&dqm->packet_mgr,
+ grace_period);
+ if (retval)
+ pr_err("Setting grace timeout failed\n");
+ else if (dqm->dev->kfd2kgd->build_grace_period_packet_info)
+ /* Update dqm->wait_times maintained in software */
+ dqm->dev->kfd2kgd->build_grace_period_packet_info(
+ dqm->dev->adev, dqm->wait_times,
+ grace_period, &reg_offset,
+ &dqm->wait_times,
+ ffs(dqm->dev->xcc_mask) - 1);
+ }
+
dqm_unlock(dqm);
return 0;
fail_allocate_vidmem:
fail_set_sched_resources:
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
pm_uninit(&dqm->packet_mgr, false);
fail_packet_manager_init:
dqm_unlock(dqm);
@@ -1541,8 +1704,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
}
if (!dqm->is_hws_hang) {
- if (!dqm->dev->shared_resources.enable_mes)
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false);
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
else
remove_all_queues_mes(dqm);
}
@@ -1550,11 +1713,11 @@ static int stop_cpsch(struct device_queue_manager *dqm)
hanging = dqm->is_hws_hang || dqm->is_resetting;
dqm->sched_running = false;
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
pm_release_ib(&dqm->packet_mgr);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
pm_uninit(&dqm->packet_mgr, hanging);
dqm_unlock(dqm);
@@ -1584,7 +1747,8 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
list_add(&kq->list, &qpd->priv_queue_list);
increment_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = true;
- execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD);
dqm_unlock(dqm);
return 0;
@@ -1598,7 +1762,8 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
list_del(&kq->list);
decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
- execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type.
@@ -1658,6 +1823,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
* updates the is_evicted flag but is a no-op otherwise.
*/
q->properties.is_evicted = !!qpd->evicted;
+ q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled &&
+ kfd_dbg_has_cwsr_workaround(q->device);
if (qd)
mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
@@ -1673,9 +1840,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (q->properties.is_active) {
increment_queue_count(dqm, qpd, q);
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
else
retval = add_queue_mes(dqm, q, qpd);
if (retval)
@@ -1764,7 +1931,9 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
/* dqm->lock mutex has to be locked before calling this function */
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param, bool reset)
+ uint32_t filter_param,
+ uint32_t grace_period,
+ bool reset)
{
int retval = 0;
struct mqd_manager *mqd_mgr;
@@ -1776,6 +1945,12 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist)
return retval;
+ if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
+ retval = pm_update_grace_period(&dqm->packet_mgr, grace_period);
+ if (retval)
+ return retval;
+ }
+
retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
if (retval)
return retval;
@@ -1808,6 +1983,13 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
return -ETIME;
}
+ /* We need to reset the grace period value for this device */
+ if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
+ if (pm_update_grace_period(&dqm->packet_mgr,
+ USE_DEFAULT_GRACE_PERIOD))
+ pr_err("Failed to reset grace period\n");
+ }
+
pm_release_ib(&dqm->packet_mgr);
dqm->active_runlist = false;
@@ -1823,7 +2005,7 @@ static int reset_queues_cpsch(struct device_queue_manager *dqm,
dqm_lock(dqm);
retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
- pasid, true);
+ pasid, USE_DEFAULT_GRACE_PERIOD, true);
dqm_unlock(dqm);
return retval;
@@ -1832,19 +2014,45 @@ static int reset_queues_cpsch(struct device_queue_manager *dqm,
/* dqm->lock mutex has to be locked before calling this function */
static int execute_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
- uint32_t filter_param)
+ uint32_t filter_param,
+ uint32_t grace_period)
{
int retval;
if (dqm->is_hws_hang)
return -EIO;
- retval = unmap_queues_cpsch(dqm, filter, filter_param, false);
+ retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false);
if (retval)
return retval;
return map_queues_cpsch(dqm);
}
+static int wait_on_destroy_queue(struct device_queue_manager *dqm,
+ struct queue *q)
+{
+ struct kfd_process_device *pdd = kfd_get_process_device_data(q->device,
+ q->process);
+ int ret = 0;
+
+ if (pdd->qpd.is_debug)
+ return ret;
+
+ q->properties.is_being_destroyed = true;
+
+ if (pdd->process->debug_trap_enabled && q->properties.is_suspended) {
+ dqm_unlock(dqm);
+ mutex_unlock(&q->process->mutex);
+ ret = wait_event_interruptible(dqm->destroy_wait,
+ !q->properties.is_suspended);
+
+ mutex_lock(&q->process->mutex);
+ dqm_lock(dqm);
+ }
+
+ return ret;
+}
+
static int destroy_queue_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
@@ -1864,11 +2072,16 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
q->properties.queue_id);
}
- retval = 0;
-
/* remove queue from list to prevent rescheduling after preemption */
dqm_lock(dqm);
+ retval = wait_on_destroy_queue(dqm, q);
+
+ if (retval) {
+ dqm_unlock(dqm);
+ return retval;
+ }
+
if (qpd->is_debug) {
/*
* error, currently we do not allow to destroy a queue
@@ -1893,10 +2106,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
- if (!dqm->dev->shared_resources.enable_mes) {
- decrement_queue_count(dqm, qpd, q);
+ decrement_queue_count(dqm, qpd, q);
+ if (!dqm->dev->kfd->shared_resources.enable_mes) {
retval = execute_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
} else {
@@ -1914,7 +2128,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
dqm_unlock(dqm);
- /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
+ /*
+ * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid
+ * circular locking
+ */
+ kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE),
+ qpd->pqm->process, q->device,
+ -1, false, NULL, 0);
+
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
return retval;
@@ -2056,7 +2277,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
- q->properties.is_active || !q->device->cwsr_enabled ||
+ q->properties.is_active || !q->device->kfd->cwsr_enabled ||
!mqd_mgr->get_wave_state) {
dqm_unlock(dqm);
return -EINVAL;
@@ -2069,8 +2290,8 @@ static int get_wave_state(struct device_queue_manager *dqm,
* and the queue should be protected against destruction by the process
* lock.
*/
- return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
- ctl_stack_used_size, save_area_used_size);
+ return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties,
+ ctl_stack, ctl_stack_used_size, save_area_used_size);
}
static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
@@ -2105,7 +2326,7 @@ static int checkpoint_mqd(struct device_queue_manager *dqm,
dqm_lock(dqm);
- if (q->properties.is_active || !q->device->cwsr_enabled) {
+ if (q->properties.is_active || !q->device->kfd->cwsr_enabled) {
r = -EINVAL;
goto dqm_unlock;
}
@@ -2158,7 +2379,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
if (q->properties.is_active) {
decrement_queue_count(dqm, qpd, q);
- if (dqm->dev->shared_resources.enable_mes) {
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = remove_queue_mes(dqm, q, qpd);
if (retval)
pr_err("Failed to remove queue %d\n",
@@ -2180,8 +2401,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
}
}
- if (!dqm->dev->shared_resources.enable_mes)
- retval = execute_queues_cpsch(dqm, filter, 0);
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
+ retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD);
if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
@@ -2242,12 +2463,13 @@ out_free:
static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
{
int retval;
- struct kfd_dev *dev = dqm->dev;
+ struct kfd_node *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
get_num_all_sdma_engines(dqm) *
- dev->device_info.num_sdma_queues_per_engine +
- dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
+ dev->kfd->device_info.num_sdma_queues_per_engine +
+ (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
+ NUM_XCC(dqm->dev->xcc_mask));
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
@@ -2256,7 +2478,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
return retval;
}
-struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
{
struct device_queue_manager *dqm;
@@ -2335,32 +2557,26 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
}
switch (dev->adev->asic_type) {
- case CHIP_CARRIZO:
- device_queue_manager_init_vi(&dqm->asic_ops);
- break;
-
case CHIP_KAVERI:
- device_queue_manager_init_cik(&dqm->asic_ops);
- break;
-
case CHIP_HAWAII:
- device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
+ device_queue_manager_init_cik(&dqm->asic_ops);
break;
+ case CHIP_CARRIZO:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
- device_queue_manager_init_vi_tonga(&dqm->asic_ops);
+ device_queue_manager_init_vi(&dqm->asic_ops);
break;
default:
if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0))
device_queue_manager_init_v11(&dqm->asic_ops);
else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
- device_queue_manager_init_v10_navi10(&dqm->asic_ops);
+ device_queue_manager_init_v10(&dqm->asic_ops);
else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
device_queue_manager_init_v9(&dqm->asic_ops);
else {
@@ -2373,20 +2589,22 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
if (init_mqd_managers(dqm))
goto out_free;
- if (!dev->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
+ if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
goto out_free;
}
- if (!dqm->ops.initialize(dqm))
+ if (!dqm->ops.initialize(dqm)) {
+ init_waitqueue_head(&dqm->destroy_wait);
return dqm;
+ }
out_free:
kfree(dqm);
return NULL;
}
-static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
+static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
struct kfd_mem_obj *mqd)
{
WARN(!mqd, "No hiq sdma mqd trunk to free");
@@ -2396,8 +2614,9 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
+ dqm->ops.stop(dqm);
dqm->ops.uninitialize(dqm);
- if (!dqm->dev->shared_resources.enable_mes)
+ if (!dqm->dev->kfd->shared_resources.enable_mes)
deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
kfree(dqm);
}
@@ -2426,6 +2645,490 @@ static void kfd_process_hw_exception(struct work_struct *work)
amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
}
+int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ int r;
+ int updated_vmid_mask;
+
+ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ return -EINVAL;
+ }
+
+ dqm_lock(dqm);
+
+ if (dqm->trap_debug_vmid != 0) {
+ pr_err("Trap debug id already reserved\n");
+ r = -EBUSY;
+ goto out_unlock;
+ }
+
+ r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD, false);
+ if (r)
+ goto out_unlock;
+
+ updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
+ updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd);
+
+ dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
+ dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd;
+ r = set_sched_resources(dqm);
+ if (r)
+ goto out_unlock;
+
+ r = map_queues_cpsch(dqm);
+ if (r)
+ goto out_unlock;
+
+ pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid);
+
+out_unlock:
+ dqm_unlock(dqm);
+ return r;
+}
+
+/*
+ * Releases vmid for the trap debugger
+ */
+int release_debug_trap_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ int r;
+ int updated_vmid_mask;
+ uint32_t trap_debug_vmid;
+
+ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ return -EINVAL;
+ }
+
+ dqm_lock(dqm);
+ trap_debug_vmid = dqm->trap_debug_vmid;
+ if (dqm->trap_debug_vmid == 0) {
+ pr_err("Trap debug id is not reserved\n");
+ r = -EINVAL;
+ goto out_unlock;
+ }
+
+ r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
+ USE_DEFAULT_GRACE_PERIOD, false);
+ if (r)
+ goto out_unlock;
+
+ updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
+ updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd);
+
+ dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
+ dqm->trap_debug_vmid = 0;
+ r = set_sched_resources(dqm);
+ if (r)
+ goto out_unlock;
+
+ r = map_queues_cpsch(dqm);
+ if (r)
+ goto out_unlock;
+
+ pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid);
+
+out_unlock:
+ dqm_unlock(dqm);
+ return r;
+}
+
+#define QUEUE_NOT_FOUND -1
+/* invalidate queue operation in array */
+static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids)
+{
+ int i;
+
+ for (i = 0; i < num_queues; i++)
+ queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK;
+}
+
+/* find queue index in array */
+static int q_array_get_index(unsigned int queue_id,
+ uint32_t num_queues,
+ uint32_t *queue_ids)
+{
+ int i;
+
+ for (i = 0; i < num_queues; i++)
+ if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK))
+ return i;
+
+ return QUEUE_NOT_FOUND;
+}
+
+struct copy_context_work_handler_workarea {
+ struct work_struct copy_context_work;
+ struct kfd_process *p;
+};
+
+static void copy_context_work_handler (struct work_struct *work)
+{
+ struct copy_context_work_handler_workarea *workarea;
+ struct mqd_manager *mqd_mgr;
+ struct queue *q;
+ struct mm_struct *mm;
+ struct kfd_process *p;
+ uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size;
+ int i;
+
+ workarea = container_of(work,
+ struct copy_context_work_handler_workarea,
+ copy_context_work);
+
+ p = workarea->p;
+ mm = get_task_mm(p->lead_thread);
+
+ if (!mm)
+ return;
+
+ kthread_use_mm(mm);
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+ struct device_queue_manager *dqm = pdd->dev->dqm;
+ struct qcm_process_device *qpd = &pdd->qpd;
+
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
+
+ /* We ignore the return value from get_wave_state
+ * because
+ * i) right now, it always returns 0, and
+ * ii) if we hit an error, we would continue to the
+ * next queue anyway.
+ */
+ mqd_mgr->get_wave_state(mqd_mgr,
+ q->mqd,
+ &q->properties,
+ (void __user *) q->properties.ctx_save_restore_area_address,
+ &tmp_ctl_stack_used_size,
+ &tmp_save_area_used_size);
+ }
+ }
+ kthread_unuse_mm(mm);
+ mmput(mm);
+}
+
+static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array)
+{
+ size_t array_size = num_queues * sizeof(uint32_t);
+
+ if (!usr_queue_id_array)
+ return NULL;
+
+ return memdup_user(usr_queue_id_array, array_size);
+}
+
+int resume_queues(struct kfd_process *p,
+ uint32_t num_queues,
+ uint32_t *usr_queue_id_array)
+{
+ uint32_t *queue_ids = NULL;
+ int total_resumed = 0;
+ int i;
+
+ if (usr_queue_id_array) {
+ queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
+
+ if (IS_ERR(queue_ids))
+ return PTR_ERR(queue_ids);
+
+ /* mask all queues as invalid. unmask per successful request */
+ q_array_invalidate(num_queues, queue_ids);
+ }
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+ struct device_queue_manager *dqm = pdd->dev->dqm;
+ struct qcm_process_device *qpd = &pdd->qpd;
+ struct queue *q;
+ int r, per_device_resumed = 0;
+
+ dqm_lock(dqm);
+
+ /* unmask queues that resume or already resumed as valid */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ int q_idx = QUEUE_NOT_FOUND;
+
+ if (queue_ids)
+ q_idx = q_array_get_index(
+ q->properties.queue_id,
+ num_queues,
+ queue_ids);
+
+ if (!queue_ids || q_idx != QUEUE_NOT_FOUND) {
+ int err = resume_single_queue(dqm, &pdd->qpd, q);
+
+ if (queue_ids) {
+ if (!err) {
+ queue_ids[q_idx] &=
+ ~KFD_DBG_QUEUE_INVALID_MASK;
+ } else {
+ queue_ids[q_idx] |=
+ KFD_DBG_QUEUE_ERROR_MASK;
+ break;
+ }
+ }
+
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
+ wake_up_all(&dqm->destroy_wait);
+ if (!err)
+ total_resumed++;
+ } else {
+ per_device_resumed++;
+ }
+ }
+ }
+
+ if (!per_device_resumed) {
+ dqm_unlock(dqm);
+ continue;
+ }
+
+ r = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
+ 0,
+ USE_DEFAULT_GRACE_PERIOD);
+ if (r) {
+ pr_err("Failed to resume process queues\n");
+ if (queue_ids) {
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ int q_idx = q_array_get_index(
+ q->properties.queue_id,
+ num_queues,
+ queue_ids);
+
+ /* mask queue as error on resume fail */
+ if (q_idx != QUEUE_NOT_FOUND)
+ queue_ids[q_idx] |=
+ KFD_DBG_QUEUE_ERROR_MASK;
+ }
+ }
+ } else {
+ wake_up_all(&dqm->destroy_wait);
+ total_resumed += per_device_resumed;
+ }
+
+ dqm_unlock(dqm);
+ }
+
+ if (queue_ids) {
+ if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
+ num_queues * sizeof(uint32_t)))
+ pr_err("copy_to_user failed on queue resume\n");
+
+ kfree(queue_ids);
+ }
+
+ return total_resumed;
+}
+
+int suspend_queues(struct kfd_process *p,
+ uint32_t num_queues,
+ uint32_t grace_period,
+ uint64_t exception_clear_mask,
+ uint32_t *usr_queue_id_array)
+{
+ uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
+ int total_suspended = 0;
+ int i;
+
+ if (IS_ERR(queue_ids))
+ return PTR_ERR(queue_ids);
+
+ /* mask all queues as invalid. umask on successful request */
+ q_array_invalidate(num_queues, queue_ids);
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+ struct device_queue_manager *dqm = pdd->dev->dqm;
+ struct qcm_process_device *qpd = &pdd->qpd;
+ struct queue *q;
+ int r, per_device_suspended = 0;
+
+ mutex_lock(&p->event_mutex);
+ dqm_lock(dqm);
+
+ /* unmask queues that suspend or already suspended */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ int q_idx = q_array_get_index(q->properties.queue_id,
+ num_queues,
+ queue_ids);
+
+ if (q_idx != QUEUE_NOT_FOUND) {
+ int err = suspend_single_queue(dqm, pdd, q);
+ bool is_mes = dqm->dev->kfd->shared_resources.enable_mes;
+
+ if (!err) {
+ queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK;
+ if (exception_clear_mask && is_mes)
+ q->properties.exception_status &=
+ ~exception_clear_mask;
+
+ if (is_mes)
+ total_suspended++;
+ else
+ per_device_suspended++;
+ } else if (err != -EBUSY) {
+ r = err;
+ queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
+ break;
+ }
+ }
+ }
+
+ if (!per_device_suspended) {
+ dqm_unlock(dqm);
+ mutex_unlock(&p->event_mutex);
+ if (total_suspended)
+ amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev);
+ continue;
+ }
+
+ r = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
+ grace_period);
+
+ if (r)
+ pr_err("Failed to suspend process queues.\n");
+ else
+ total_suspended += per_device_suspended;
+
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ int q_idx = q_array_get_index(q->properties.queue_id,
+ num_queues, queue_ids);
+
+ if (q_idx == QUEUE_NOT_FOUND)
+ continue;
+
+ /* mask queue as error on suspend fail */
+ if (r)
+ queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
+ else if (exception_clear_mask)
+ q->properties.exception_status &=
+ ~exception_clear_mask;
+ }
+
+ dqm_unlock(dqm);
+ mutex_unlock(&p->event_mutex);
+ amdgpu_device_flush_hdp(dqm->dev->adev, NULL);
+ }
+
+ if (total_suspended) {
+ struct copy_context_work_handler_workarea copy_context_worker;
+
+ INIT_WORK_ONSTACK(
+ &copy_context_worker.copy_context_work,
+ copy_context_work_handler);
+
+ copy_context_worker.p = p;
+
+ schedule_work(&copy_context_worker.copy_context_work);
+
+
+ flush_work(&copy_context_worker.copy_context_work);
+ destroy_work_on_stack(&copy_context_worker.copy_context_work);
+ }
+
+ if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
+ num_queues * sizeof(uint32_t)))
+ pr_err("copy_to_user failed on queue suspend\n");
+
+ kfree(queue_ids);
+
+ return total_suspended;
+}
+
+static uint32_t set_queue_type_for_user(struct queue_properties *q_props)
+{
+ switch (q_props->type) {
+ case KFD_QUEUE_TYPE_COMPUTE:
+ return q_props->format == KFD_QUEUE_FORMAT_PM4
+ ? KFD_IOC_QUEUE_TYPE_COMPUTE
+ : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL;
+ case KFD_QUEUE_TYPE_SDMA:
+ return KFD_IOC_QUEUE_TYPE_SDMA;
+ case KFD_QUEUE_TYPE_SDMA_XGMI:
+ return KFD_IOC_QUEUE_TYPE_SDMA_XGMI;
+ default:
+ WARN_ONCE(true, "queue type not recognized!");
+ return 0xffffffff;
+ };
+}
+
+void set_queue_snapshot_entry(struct queue *q,
+ uint64_t exception_clear_mask,
+ struct kfd_queue_snapshot_entry *qss_entry)
+{
+ qss_entry->ring_base_address = q->properties.queue_address;
+ qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr;
+ qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr;
+ qss_entry->ctx_save_restore_address =
+ q->properties.ctx_save_restore_area_address;
+ qss_entry->ctx_save_restore_area_size =
+ q->properties.ctx_save_restore_area_size;
+ qss_entry->exception_status = q->properties.exception_status;
+ qss_entry->queue_id = q->properties.queue_id;
+ qss_entry->gpu_id = q->device->id;
+ qss_entry->ring_size = (uint32_t)q->properties.queue_size;
+ qss_entry->queue_type = set_queue_type_for_user(&q->properties);
+ q->properties.exception_status &= ~exception_clear_mask;
+}
+
+int debug_lock_and_unmap(struct device_queue_manager *dqm)
+{
+ int r;
+
+ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ return -EINVAL;
+ }
+
+ if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
+ return 0;
+
+ dqm_lock(dqm);
+
+ r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false);
+ if (r)
+ dqm_unlock(dqm);
+
+ return r;
+}
+
+int debug_map_and_unlock(struct device_queue_manager *dqm)
+{
+ int r;
+
+ if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+ return -EINVAL;
+ }
+
+ if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
+ return 0;
+
+ r = map_queues_cpsch(dqm);
+
+ dqm_unlock(dqm);
+
+ return r;
+}
+
+int debug_refresh_runlist(struct device_queue_manager *dqm)
+{
+ int r = debug_lock_and_unmap(dqm);
+
+ if (r)
+ return r;
+
+ return debug_map_and_unlock(dqm);
+}
+
#if defined(CONFIG_DEBUG_FS)
static void seq_reg_dump(struct seq_file *m,
@@ -2452,52 +3155,66 @@ static void seq_reg_dump(struct seq_file *m,
int dqm_debugfs_hqds(struct seq_file *m, void *data)
{
struct device_queue_manager *dqm = data;
+ uint32_t xcc_mask = dqm->dev->xcc_mask;
uint32_t (*dump)[2], n_regs;
int pipe, queue;
- int r = 0;
+ int r = 0, xcc_id;
+ uint32_t sdma_engine_start;
if (!dqm->sched_running) {
seq_puts(m, " Device is stopped\n");
return 0;
}
- r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
- KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
- &dump, &n_regs);
- if (!r) {
- seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
- KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
- KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
- KFD_CIK_HIQ_QUEUE);
- seq_reg_dump(m, dump, n_regs);
+ for_each_inst(xcc_id, xcc_mask) {
+ r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
+ KFD_CIK_HIQ_PIPE,
+ KFD_CIK_HIQ_QUEUE, &dump,
+ &n_regs, xcc_id);
+ if (!r) {
+ seq_printf(
+ m,
+ " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n",
+ xcc_id,
+ KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1,
+ KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm),
+ KFD_CIK_HIQ_QUEUE);
+ seq_reg_dump(m, dump, n_regs);
- kfree(dump);
- }
+ kfree(dump);
+ }
- for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
- int pipe_offset = pipe * get_queues_per_pipe(dqm);
+ for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
+ int pipe_offset = pipe * get_queues_per_pipe(dqm);
- for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
- if (!test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.cp_queue_bitmap))
- continue;
+ for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
+ if (!test_bit(pipe_offset + queue,
+ dqm->dev->kfd->shared_resources.cp_queue_bitmap))
+ continue;
- r = dqm->dev->kfd2kgd->hqd_dump(
- dqm->dev->adev, pipe, queue, &dump, &n_regs);
- if (r)
- break;
+ r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
+ pipe, queue,
+ &dump, &n_regs,
+ xcc_id);
+ if (r)
+ break;
- seq_printf(m, " CP Pipe %d, Queue %d\n",
- pipe, queue);
- seq_reg_dump(m, dump, n_regs);
+ seq_printf(m,
+ " Inst %d, CP Pipe %d, Queue %d\n",
+ xcc_id, pipe, queue);
+ seq_reg_dump(m, dump, n_regs);
- kfree(dump);
+ kfree(dump);
+ }
}
}
- for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
+ sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
+ for (pipe = sdma_engine_start;
+ pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm));
+ pipe++) {
for (queue = 0;
- queue < dqm->dev->device_info.num_sdma_queues_per_engine;
+ queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
queue++) {
r = dqm->dev->kfd2kgd->hqd_sdma_dump(
dqm->dev->adev, pipe, queue, &dump, &n_regs);
@@ -2526,7 +3243,8 @@ int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
return r;
}
dqm->active_runlist = true;
- r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
+ 0, USE_DEFAULT_GRACE_PERIOD);
dqm_unlock(dqm);
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index a537b9ef3e16..cf7e182588f8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -37,6 +37,7 @@
#define KFD_MES_PROCESS_QUANTUM 100000
#define KFD_MES_GANG_QUANTUM 10000
+#define USE_DEFAULT_GRACE_PERIOD 0xffffffff
struct device_process_node {
struct qcm_process_device *qpd;
@@ -207,7 +208,7 @@ struct device_queue_manager_asic_ops {
struct queue *q,
struct qcm_process_device *qpd);
struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
};
/**
@@ -228,7 +229,7 @@ struct device_queue_manager {
struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
struct packet_manager packet_mgr;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */
struct list_head queues;
unsigned int saved_flags;
@@ -239,8 +240,8 @@ struct device_queue_manager {
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
- uint64_t sdma_bitmap;
- uint64_t xgmi_sdma_bitmap;
+ DECLARE_BITMAP(sdma_bitmap, KFD_MAX_SDMA_QUEUES);
+ DECLARE_BITMAP(xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
/* the pasid mapping for each kfd vmid */
uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
@@ -249,6 +250,7 @@ struct device_queue_manager {
struct kfd_mem_obj *fence_mem;
bool active_runlist;
int sched_policy;
+ uint32_t trap_debug_vmid;
/* hw exception */
bool is_hws_hang;
@@ -256,19 +258,22 @@ struct device_queue_manager {
struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
bool sched_running;
+
+ /* used for GFX 9.4.3 only */
+ uint32_t current_logical_xcc_start;
+
+ uint32_t wait_times;
+
+ wait_queue_head_t destroy_wait;
};
void device_queue_manager_init_cik(
struct device_queue_manager_asic_ops *asic_ops);
-void device_queue_manager_init_cik_hawaii(
- struct device_queue_manager_asic_ops *asic_ops);
void device_queue_manager_init_vi(
struct device_queue_manager_asic_ops *asic_ops);
-void device_queue_manager_init_vi_tonga(
- struct device_queue_manager_asic_ops *asic_ops);
void device_queue_manager_init_v9(
struct device_queue_manager_asic_ops *asic_ops);
-void device_queue_manager_init_v10_navi10(
+void device_queue_manager_init_v10(
struct device_queue_manager_asic_ops *asic_ops);
void device_queue_manager_init_v11(
struct device_queue_manager_asic_ops *asic_ops);
@@ -279,6 +284,24 @@ unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
+int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+int release_debug_trap_vmid(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+int suspend_queues(struct kfd_process *p,
+ uint32_t num_queues,
+ uint32_t grace_period,
+ uint64_t exception_clear_mask,
+ uint32_t *usr_queue_id_array);
+int resume_queues(struct kfd_process *p,
+ uint32_t num_queues,
+ uint32_t *usr_queue_id_array);
+void set_queue_snapshot_entry(struct queue *q,
+ uint64_t exception_clear_mask,
+ struct kfd_queue_snapshot_entry *qss_entry);
+int debug_lock_and_unmap(struct device_queue_manager *dqm);
+int debug_map_and_unlock(struct device_queue_manager *dqm);
+int debug_refresh_runlist(struct device_queue_manager *dqm);
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index b1ab5b0775e1..d4d95c7f2e5d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -34,17 +34,13 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
static int update_qpd_cik(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd);
-static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd);
-static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
- struct qcm_process_device *qpd);
-static void init_sdma_vm_hawaii(struct device_queue_manager *dqm,
- struct queue *q,
- struct qcm_process_device *qpd);
+ struct qcm_process_device *qpd);
+static void init_sdma_vm(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
void device_queue_manager_init_cik(
- struct device_queue_manager_asic_ops *asic_ops)
+ struct device_queue_manager_asic_ops *asic_ops)
{
asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik;
asic_ops->update_qpd = update_qpd_cik;
@@ -52,15 +48,6 @@ void device_queue_manager_init_cik(
asic_ops->mqd_manager_init = mqd_manager_init_cik;
}
-void device_queue_manager_init_cik_hawaii(
- struct device_queue_manager_asic_ops *asic_ops)
-{
- asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik;
- asic_ops->update_qpd = update_qpd_cik_hawaii;
- asic_ops->init_sdma_vm = init_sdma_vm_hawaii;
- asic_ops->mqd_manager_init = mqd_manager_init_cik_hawaii;
-}
-
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
{
/* In 64-bit mode, we can only control the top 3 bits of the LDS,
@@ -115,41 +102,7 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
}
static int update_qpd_cik(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct kfd_process_device *pdd;
- unsigned int temp;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
- DEFAULT_MTYPE(MTYPE_NONCACHED) |
- APE1_MTYPE(MTYPE_NONCACHED);
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
- if (qpd->pqm->process->is_32bit_user_mode) {
- temp = get_sh_mem_bases_32(pdd);
- qpd->sh_mem_bases = SHARED_BASE(temp);
- qpd->sh_mem_config |= PTR32;
- } else {
- temp = get_sh_mem_bases_nybble_64(pdd);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
- qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
- }
-
- pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
- qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
-
- return 0;
-}
-
-static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+ struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
unsigned int temp;
@@ -178,25 +131,9 @@ static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
return 0;
}
-static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
- struct qcm_process_device *qpd)
-{
- uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
-
- if (q->process->is_32bit_user_mode)
- value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
- get_sh_mem_bases_32(qpd_to_pdd(qpd));
- else
- value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
- SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
- SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
-
- q->properties.sdma_vm_addr = value;
-}
-
-static void init_sdma_vm_hawaii(struct device_queue_manager *dqm,
- struct queue *q,
- struct qcm_process_device *qpd)
+static void init_sdma_vm(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
{
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
index f1a1f5753e65..245a90dfc2f6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
@@ -32,7 +32,7 @@ static int update_qpd_v10(struct device_queue_manager *dqm,
static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
-void device_queue_manager_init_v10_navi10(
+void device_queue_manager_init_v10(
struct device_queue_manager_asic_ops *asic_ops)
{
asic_ops->update_qpd = update_qpd_v10;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 8b2dd2670ab7..54eb1bff903c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -24,9 +24,7 @@
#include "kfd_device_queue_manager.h"
#include "vega10_enum.h"
-#include "gc/gc_9_0_offset.h"
-#include "gc/gc_9_0_sh_mask.h"
-#include "sdma0/sdma0_4_0_sh_mask.h"
+#include "gc/gc_9_4_3_sh_mask.h"
static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
@@ -62,9 +60,13 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (dqm->dev->noretry && !dqm->dev->use_iommu_v2)
+ if (dqm->dev->kfd->noretry)
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+ if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3))
+ qpd->sh_mem_config |=
+ (1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
+
qpd->sh_mem_ape1_limit = 0;
qpd->sh_mem_ape1_base = 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index d7d45832df0f..b291ee0fab94 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -28,29 +28,19 @@
#include "oss/oss_3_0_sh_mask.h"
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd,
- enum cache_policy default_policy,
- enum cache_policy alternate_policy,
- void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
-static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd,
- enum cache_policy default_policy,
- enum cache_policy alternate_policy,
- void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
static int update_qpd_vi(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd);
-static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd);
-static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
- struct qcm_process_device *qpd);
-static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
- struct queue *q,
- struct qcm_process_device *qpd);
+ struct qcm_process_device *qpd);
+static void init_sdma_vm(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd);
void device_queue_manager_init_vi(
- struct device_queue_manager_asic_ops *asic_ops)
+ struct device_queue_manager_asic_ops *asic_ops)
{
asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi;
asic_ops->update_qpd = update_qpd_vi;
@@ -58,15 +48,6 @@ void device_queue_manager_init_vi(
asic_ops->mqd_manager_init = mqd_manager_init_vi;
}
-void device_queue_manager_init_vi_tonga(
- struct device_queue_manager_asic_ops *asic_ops)
-{
- asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi_tonga;
- asic_ops->update_qpd = update_qpd_vi_tonga;
- asic_ops->init_sdma_vm = init_sdma_vm_tonga;
- asic_ops->mqd_manager_init = mqd_manager_init_vi_tonga;
-}
-
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
{
/* In 64-bit mode, we can only control the top 3 bits of the LDS,
@@ -96,35 +77,6 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
}
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd,
- enum cache_policy default_policy,
- enum cache_policy alternate_policy,
- void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size)
-{
- uint32_t default_mtype;
- uint32_t ape1_mtype;
-
- default_mtype = (default_policy == cache_policy_coherent) ?
- MTYPE_CC :
- MTYPE_NC;
-
- ape1_mtype = (alternate_policy == cache_policy_coherent) ?
- MTYPE_CC :
- MTYPE_NC;
-
- qpd->sh_mem_config = (qpd->sh_mem_config &
- SH_MEM_CONFIG__ADDRESS_MODE_MASK) |
- SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
- default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
- ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
- SH_MEM_CONFIG__PRIVATE_ATC_MASK;
-
- return true;
-}
-
-static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
@@ -152,48 +104,7 @@ static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
}
static int update_qpd_vi(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct kfd_process_device *pdd;
- unsigned int temp;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
- MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
- MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
- SH_MEM_CONFIG__PRIVATE_ATC_MASK;
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
- if (qpd->pqm->process->is_32bit_user_mode) {
- temp = get_sh_mem_bases_32(pdd);
- qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT;
- qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 <<
- SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
- } else {
- temp = get_sh_mem_bases_nybble_64(pdd);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
- qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
- SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
- qpd->sh_mem_config |= 1 <<
- SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
- }
-
- pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
- qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
-
- return 0;
-}
-
-static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+ struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
unsigned int temp;
@@ -226,25 +137,9 @@ static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
return 0;
}
-static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
- struct qcm_process_device *qpd)
-{
- uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
-
- if (q->process->is_32bit_user_mode)
- value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
- get_sh_mem_bases_32(qpd_to_pdd(qpd));
- else
- value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
- SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
- SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
-
- q->properties.sdma_vm_addr = value;
-}
-
-static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
- struct queue *q,
- struct qcm_process_device *qpd)
+static void init_sdma_vm(struct device_queue_manager *dqm,
+ struct queue *q,
+ struct qcm_process_device *qpd)
{
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 38c9e1ca6691..c2e0b79dcc6d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -61,84 +61,49 @@ size_t kfd_doorbell_process_slice(struct kfd_dev *kfd)
/* Doorbell calculations for device init. */
int kfd_doorbell_init(struct kfd_dev *kfd)
{
- size_t doorbell_start_offset;
- size_t doorbell_aperture_size;
- size_t doorbell_process_limit;
+ int size = PAGE_SIZE;
+ int r;
/*
- * With MES enabled, just set the doorbell base as it is needed
- * to calculate doorbell physical address.
- */
- if (kfd->shared_resources.enable_mes) {
- kfd->doorbell_base =
- kfd->shared_resources.doorbell_physical_address;
- return 0;
- }
-
- /*
- * We start with calculations in bytes because the input data might
- * only be byte-aligned.
- * Only after we have done the rounding can we assume any alignment.
+ * Todo: KFD kernel level operations need only one doorbell for
+ * ring test/HWS. So instead of reserving a whole page here for
+ * kernel, reserve and consume a doorbell from existing KGD kernel
+ * doorbell page.
*/
- doorbell_start_offset =
- roundup(kfd->shared_resources.doorbell_start_offset,
- kfd_doorbell_process_slice(kfd));
-
- doorbell_aperture_size =
- rounddown(kfd->shared_resources.doorbell_aperture_size,
- kfd_doorbell_process_slice(kfd));
-
- if (doorbell_aperture_size > doorbell_start_offset)
- doorbell_process_limit =
- (doorbell_aperture_size - doorbell_start_offset) /
- kfd_doorbell_process_slice(kfd);
- else
- return -ENOSPC;
-
- if (!kfd->max_doorbell_slices ||
- doorbell_process_limit < kfd->max_doorbell_slices)
- kfd->max_doorbell_slices = doorbell_process_limit;
-
- kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
- doorbell_start_offset;
-
- kfd->doorbell_base_dw_offset = doorbell_start_offset / sizeof(u32);
-
- kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
- kfd_doorbell_process_slice(kfd));
-
- if (!kfd->doorbell_kernel_ptr)
+ /* Bitmap to dynamically allocate doorbells from kernel page */
+ kfd->doorbell_bitmap = bitmap_zalloc(size / sizeof(u32), GFP_KERNEL);
+ if (!kfd->doorbell_bitmap) {
+ DRM_ERROR("Failed to allocate kernel doorbell bitmap\n");
return -ENOMEM;
+ }
- pr_debug("Doorbell initialization:\n");
- pr_debug("doorbell base == 0x%08lX\n",
- (uintptr_t)kfd->doorbell_base);
-
- pr_debug("doorbell_base_dw_offset == 0x%08lX\n",
- kfd->doorbell_base_dw_offset);
-
- pr_debug("doorbell_process_limit == 0x%08lX\n",
- doorbell_process_limit);
-
- pr_debug("doorbell_kernel_offset == 0x%08lX\n",
- (uintptr_t)kfd->doorbell_base);
-
- pr_debug("doorbell aperture size == 0x%08lX\n",
- kfd->shared_resources.doorbell_aperture_size);
-
- pr_debug("doorbell kernel address == %p\n", kfd->doorbell_kernel_ptr);
+ /* Alloc a doorbell page for KFD kernel usages */
+ r = amdgpu_bo_create_kernel(kfd->adev,
+ size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_DOORBELL,
+ &kfd->doorbells,
+ NULL,
+ (void **)&kfd->doorbell_kernel_ptr);
+ if (r) {
+ pr_err("failed to allocate kernel doorbells\n");
+ bitmap_free(kfd->doorbell_bitmap);
+ return r;
+ }
+ pr_debug("Doorbell kernel address == %p\n", kfd->doorbell_kernel_ptr);
return 0;
}
void kfd_doorbell_fini(struct kfd_dev *kfd)
{
- if (kfd->doorbell_kernel_ptr)
- iounmap(kfd->doorbell_kernel_ptr);
+ bitmap_free(kfd->doorbell_bitmap);
+ amdgpu_bo_free_kernel(&kfd->doorbells, NULL,
+ (void **)&kfd->doorbell_kernel_ptr);
}
-int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
+int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
phys_addr_t address;
@@ -148,7 +113,7 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
* For simplicitly we only allow mapping of the entire doorbell
* allocation of a single device & process.
*/
- if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev))
+ if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev->kfd))
return -EINVAL;
pdd = kfd_get_process_device_data(dev, process);
@@ -170,13 +135,13 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
(unsigned long long) vma->vm_start, address, vma->vm_flags,
- kfd_doorbell_process_slice(dev));
+ kfd_doorbell_process_slice(dev->kfd));
return io_remap_pfn_range(vma,
vma->vm_start,
address >> PAGE_SHIFT,
- kfd_doorbell_process_slice(dev),
+ kfd_doorbell_process_slice(dev->kfd),
vma->vm_page_prot);
}
@@ -188,22 +153,15 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
u32 inx;
mutex_lock(&kfd->doorbell_mutex);
- inx = find_first_zero_bit(kfd->doorbell_available_index,
- KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
+ inx = find_first_zero_bit(kfd->doorbell_bitmap, PAGE_SIZE / sizeof(u32));
- __set_bit(inx, kfd->doorbell_available_index);
+ __set_bit(inx, kfd->doorbell_bitmap);
mutex_unlock(&kfd->doorbell_mutex);
if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
return NULL;
- inx *= kfd->device_info.doorbell_size / sizeof(u32);
-
- /*
- * Calculating the kernel doorbell offset using the first
- * doorbell page.
- */
- *doorbell_off = kfd->doorbell_base_dw_offset + inx;
+ *doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev, kfd->doorbells, inx);
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
@@ -217,11 +175,10 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
{
unsigned int inx;
- inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr)
- * sizeof(u32) / kfd->device_info.doorbell_size;
+ inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
mutex_lock(&kfd->doorbell_mutex);
- __clear_bit(inx, kfd->doorbell_available_index);
+ __clear_bit(inx, kfd->doorbell_bitmap);
mutex_unlock(&kfd->doorbell_mutex);
}
@@ -243,80 +200,96 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
}
}
-unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
- struct kfd_process_device *pdd,
- unsigned int doorbell_id)
+static int init_doorbell_bitmap(struct qcm_process_device *qpd,
+ struct kfd_dev *dev)
{
- /*
- * doorbell_base_dw_offset accounts for doorbells taken by KGD.
- * index * kfd_doorbell_process_slice/sizeof(u32) adjusts to
- * the process's doorbells. The offset returned is in dword
- * units regardless of the ASIC-dependent doorbell size.
- */
- if (!kfd->shared_resources.enable_mes)
- return kfd->doorbell_base_dw_offset +
- pdd->doorbell_index
- * kfd_doorbell_process_slice(kfd) / sizeof(u32) +
- doorbell_id *
- kfd->device_info.doorbell_size / sizeof(u32);
- else
- return amdgpu_mes_get_doorbell_dw_offset_in_bar(
- (struct amdgpu_device *)kfd->adev,
- pdd->doorbell_index, doorbell_id);
-}
+ unsigned int i;
+ int range_start = dev->shared_resources.non_cp_doorbells_start;
+ int range_end = dev->shared_resources.non_cp_doorbells_end;
-uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
-{
- uint64_t num_of_elems = (kfd->shared_resources.doorbell_aperture_size -
- kfd->shared_resources.doorbell_start_offset) /
- kfd_doorbell_process_slice(kfd) + 1;
+ if (!KFD_IS_SOC15(dev))
+ return 0;
- return num_of_elems;
+ /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
+ range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
+
+ for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
+ if (i >= range_start && i <= range_end) {
+ __set_bit(i, qpd->doorbell_bitmap);
+ __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ qpd->doorbell_bitmap);
+ }
+ }
+ return 0;
}
phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
{
- if (!pdd->doorbell_index) {
- int r = kfd_alloc_process_doorbells(pdd->dev,
- &pdd->doorbell_index);
- if (r < 0)
+ struct amdgpu_device *adev = pdd->dev->adev;
+ uint32_t first_db_index;
+
+ if (!pdd->qpd.proc_doorbells) {
+ if (kfd_alloc_process_doorbells(pdd->dev->kfd, pdd))
+ /* phys_addr_t 0 is error */
return 0;
}
- return pdd->dev->doorbell_base +
- pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev);
+ first_db_index = amdgpu_doorbell_index_on_bar(adev, pdd->qpd.proc_doorbells, 0);
+ return adev->doorbell.base + first_db_index * sizeof(uint32_t);
}
-int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_index)
+int kfd_alloc_process_doorbells(struct kfd_dev *kfd, struct kfd_process_device *pdd)
{
- int r = 0;
-
- if (!kfd->shared_resources.enable_mes)
- r = ida_simple_get(&kfd->doorbell_ida, 1,
- kfd->max_doorbell_slices, GFP_KERNEL);
- else
- r = amdgpu_mes_alloc_process_doorbells(
- (struct amdgpu_device *)kfd->adev,
- doorbell_index);
+ int r;
+ struct qcm_process_device *qpd = &pdd->qpd;
+
+ /* Allocate bitmap for dynamic doorbell allocation */
+ qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
+ GFP_KERNEL);
+ if (!qpd->doorbell_bitmap) {
+ DRM_ERROR("Failed to allocate process doorbell bitmap\n");
+ return -ENOMEM;
+ }
- if (r > 0)
- *doorbell_index = r;
+ r = init_doorbell_bitmap(&pdd->qpd, kfd);
+ if (r) {
+ DRM_ERROR("Failed to initialize process doorbells\n");
+ r = -ENOMEM;
+ goto err;
+ }
- if (r < 0)
- pr_err("Failed to allocate process doorbells\n");
+ /* Allocate doorbells for this process */
+ r = amdgpu_bo_create_kernel(kfd->adev,
+ kfd_doorbell_process_slice(kfd),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_DOORBELL,
+ &qpd->proc_doorbells,
+ NULL,
+ NULL);
+ if (r) {
+ DRM_ERROR("Failed to allocate process doorbells\n");
+ goto err;
+ }
+ return 0;
+err:
+ bitmap_free(qpd->doorbell_bitmap);
+ qpd->doorbell_bitmap = NULL;
return r;
}
-void kfd_free_process_doorbells(struct kfd_dev *kfd, unsigned int doorbell_index)
+void kfd_free_process_doorbells(struct kfd_dev *kfd, struct kfd_process_device *pdd)
{
- if (doorbell_index) {
- if (!kfd->shared_resources.enable_mes)
- ida_simple_remove(&kfd->doorbell_ida, doorbell_index);
- else
- amdgpu_mes_free_process_doorbells(
- (struct amdgpu_device *)kfd->adev,
- doorbell_index);
+ struct qcm_process_device *qpd = &pdd->qpd;
+
+ if (qpd->doorbell_bitmap) {
+ bitmap_free(qpd->doorbell_bitmap);
+ qpd->doorbell_bitmap = NULL;
}
+
+ amdgpu_bo_free_kernel(&qpd->proc_doorbells, NULL, NULL);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index c894cf8f7c50..0f58be65132f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -31,7 +31,6 @@
#include <linux/memory.h>
#include "kfd_priv.h"
#include "kfd_events.h"
-#include "kfd_iommu.h"
#include <linux/device.h>
/*
@@ -41,6 +40,7 @@ struct kfd_event_waiter {
wait_queue_entry_t wait;
struct kfd_event *event; /* Event to wait for */
bool activated; /* Becomes true when event is signaled */
+ bool event_age_enabled; /* set to true when last_event_age is non-zero */
};
/*
@@ -348,7 +348,7 @@ static int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset)
{
- struct kfd_dev *kfd;
+ struct kfd_node *kfd;
struct kfd_process_device *pdd;
void *mem, *kern_addr;
uint64_t size;
@@ -431,6 +431,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
if (!ret) {
*event_id = ev->event_id;
*event_trigger_data = ev->event_id;
+ ev->event_age = 1;
} else {
kfree(ev);
}
@@ -629,6 +630,11 @@ static void set_event(struct kfd_event *ev)
* updating the wait queues in kfd_wait_on_events.
*/
ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
+ if (!(++ev->event_age)) {
+ /* Never wrap back to reserved/default event age 0/1 */
+ ev->event_age = 2;
+ WARN_ONCE(1, "event_age wrap back!");
+ }
list_for_each_entry(waiter, &ev->wq.head, wait.entry)
WRITE_ONCE(waiter->activated, true);
@@ -791,9 +797,9 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
static int init_event_waiter(struct kfd_process *p,
struct kfd_event_waiter *waiter,
- uint32_t event_id)
+ struct kfd_event_data *event_data)
{
- struct kfd_event *ev = lookup_event_by_id(p, event_id);
+ struct kfd_event *ev = lookup_event_by_id(p, event_data->event_id);
if (!ev)
return -EINVAL;
@@ -802,6 +808,15 @@ static int init_event_waiter(struct kfd_process *p,
waiter->event = ev;
waiter->activated = ev->signaled;
ev->signaled = ev->signaled && !ev->auto_reset;
+
+ /* last_event_age = 0 reserved for backward compatible */
+ if (waiter->event->type == KFD_EVENT_TYPE_SIGNAL &&
+ event_data->signal_event_data.last_event_age) {
+ waiter->event_age_enabled = true;
+ if (ev->event_age != event_data->signal_event_data.last_event_age)
+ waiter->activated = true;
+ }
+
if (!waiter->activated)
add_wait_queue(&ev->wq, &waiter->wait);
spin_unlock(&ev->lock);
@@ -849,22 +864,29 @@ static int copy_signaled_event_data(uint32_t num_events,
struct kfd_event_waiter *event_waiters,
struct kfd_event_data __user *data)
{
- struct kfd_hsa_memory_exception_data *src;
- struct kfd_hsa_memory_exception_data __user *dst;
+ void *src;
+ void __user *dst;
struct kfd_event_waiter *waiter;
struct kfd_event *event;
- uint32_t i;
+ uint32_t i, size = 0;
for (i = 0; i < num_events; i++) {
waiter = &event_waiters[i];
event = waiter->event;
if (!event)
return -EINVAL; /* event was destroyed */
- if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
- dst = &data[i].memory_exception_data;
- src = &event->memory_exception_data;
- if (copy_to_user(dst, src,
- sizeof(struct kfd_hsa_memory_exception_data)))
+ if (waiter->activated) {
+ if (event->type == KFD_EVENT_TYPE_MEMORY) {
+ dst = &data[i].memory_exception_data;
+ src = &event->memory_exception_data;
+ size = sizeof(struct kfd_hsa_memory_exception_data);
+ } else if (event->type == KFD_EVENT_TYPE_SIGNAL &&
+ waiter->event_age_enabled) {
+ dst = &data[i].signal_event_data.last_event_age;
+ src = &event->event_age;
+ size = sizeof(u64);
+ }
+ if (size && copy_to_user(dst, src, size))
return -EFAULT;
}
}
@@ -942,8 +964,7 @@ int kfd_wait_on_events(struct kfd_process *p,
goto out_unlock;
}
- ret = init_event_waiter(p, &event_waiters[i],
- event_data.event_id);
+ ret = init_event_waiter(p, &event_waiters[i], &event_data);
if (ret)
goto out_unlock;
}
@@ -1124,87 +1145,6 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
rcu_read_unlock();
}
-#ifdef KFD_SUPPORT_IOMMU_V2
-void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid,
- unsigned long address, bool is_write_requested,
- bool is_execute_requested)
-{
- struct kfd_hsa_memory_exception_data memory_exception_data;
- struct vm_area_struct *vma;
- int user_gpu_id;
-
- /*
- * Because we are called from arbitrary context (workqueue) as opposed
- * to process context, kfd_process could attempt to exit while we are
- * running so the lookup function increments the process ref count.
- */
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
- struct mm_struct *mm;
-
- if (!p)
- return; /* Presumably process exited. */
-
- /* Take a safe reference to the mm_struct, which may otherwise
- * disappear even while the kfd_process is still referenced.
- */
- mm = get_task_mm(p->lead_thread);
- if (!mm) {
- kfd_unref_process(p);
- return; /* Process is exiting */
- }
-
- user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
- if (unlikely(user_gpu_id == -EINVAL)) {
- WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
- return;
- }
- memset(&memory_exception_data, 0, sizeof(memory_exception_data));
-
- mmap_read_lock(mm);
- vma = find_vma(mm, address);
-
- memory_exception_data.gpu_id = user_gpu_id;
- memory_exception_data.va = address;
- /* Set failure reason */
- memory_exception_data.failure.NotPresent = 1;
- memory_exception_data.failure.NoExecute = 0;
- memory_exception_data.failure.ReadOnly = 0;
- if (vma && address >= vma->vm_start) {
- memory_exception_data.failure.NotPresent = 0;
-
- if (is_write_requested && !(vma->vm_flags & VM_WRITE))
- memory_exception_data.failure.ReadOnly = 1;
- else
- memory_exception_data.failure.ReadOnly = 0;
-
- if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
- memory_exception_data.failure.NoExecute = 1;
- else
- memory_exception_data.failure.NoExecute = 0;
- }
-
- mmap_read_unlock(mm);
- mmput(mm);
-
- pr_debug("notpresent %d, noexecute %d, readonly %d\n",
- memory_exception_data.failure.NotPresent,
- memory_exception_data.failure.NoExecute,
- memory_exception_data.failure.ReadOnly);
-
- /* Workaround on Raven to not kill the process when memory is freed
- * before IOMMU is able to finish processing all the excessive PPRs
- */
-
- if (KFD_GC_VERSION(dev) != IP_VERSION(9, 1, 0) &&
- KFD_GC_VERSION(dev) != IP_VERSION(9, 2, 2) &&
- KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0))
- lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
- &memory_exception_data);
-
- kfd_unref_process(p);
-}
-#endif /* KFD_SUPPORT_IOMMU_V2 */
-
void kfd_signal_hw_exception_event(u32 pasid)
{
/*
@@ -1221,8 +1161,9 @@ void kfd_signal_hw_exception_event(u32 pasid)
kfd_unref_process(p);
}
-void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
- struct kfd_vm_fault_info *info)
+void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
+ struct kfd_vm_fault_info *info,
+ struct kfd_hsa_memory_exception_data *data)
{
struct kfd_event *ev;
uint32_t id;
@@ -1239,19 +1180,24 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
return;
}
- memset(&memory_exception_data, 0, sizeof(memory_exception_data));
- memory_exception_data.gpu_id = user_gpu_id;
- memory_exception_data.failure.imprecise = true;
- /* Set failure reason */
- if (info) {
- memory_exception_data.va = (info->page_addr) << PAGE_SHIFT;
- memory_exception_data.failure.NotPresent =
- info->prot_valid ? 1 : 0;
- memory_exception_data.failure.NoExecute =
- info->prot_exec ? 1 : 0;
- memory_exception_data.failure.ReadOnly =
- info->prot_write ? 1 : 0;
- memory_exception_data.failure.imprecise = 0;
+ /* SoC15 chips and onwards will pass in data from now on. */
+ if (!data) {
+ memset(&memory_exception_data, 0, sizeof(memory_exception_data));
+ memory_exception_data.gpu_id = user_gpu_id;
+ memory_exception_data.failure.imprecise = true;
+
+ /* Set failure reason */
+ if (info) {
+ memory_exception_data.va = (info->page_addr) <<
+ PAGE_SHIFT;
+ memory_exception_data.failure.NotPresent =
+ info->prot_valid ? 1 : 0;
+ memory_exception_data.failure.NoExecute =
+ info->prot_exec ? 1 : 0;
+ memory_exception_data.failure.ReadOnly =
+ info->prot_write ? 1 : 0;
+ memory_exception_data.failure.imprecise = 0;
+ }
}
rcu_read_lock();
@@ -1260,7 +1206,8 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
idr_for_each_entry_continue(&p->event_idr, ev, id)
if (ev->type == KFD_EVENT_TYPE_MEMORY) {
spin_lock(&ev->lock);
- ev->memory_exception_data = memory_exception_data;
+ ev->memory_exception_data = data ? *data :
+ memory_exception_data;
set_event(ev);
spin_unlock(&ev->lock);
}
@@ -1269,7 +1216,7 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
kfd_unref_process(p);
}
-void kfd_signal_reset_event(struct kfd_dev *dev)
+void kfd_signal_reset_event(struct kfd_node *dev)
{
struct kfd_hsa_hw_exception_data hw_exception_data;
struct kfd_hsa_memory_exception_data memory_exception_data;
@@ -1325,7 +1272,7 @@ void kfd_signal_reset_event(struct kfd_dev *dev)
srcu_read_unlock(&kfd_processes_srcu, idx);
}
-void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid)
+void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
{
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
struct kfd_hsa_memory_exception_data memory_exception_data;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
index 1c62c8dd6460..52ccfd397c2b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -53,6 +53,7 @@ struct signal_page;
struct kfd_event {
u32 event_id;
+ u64 event_age;
bool signaled;
bool auto_reset;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 8aebe408c544..62b205dac63a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -322,22 +322,19 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
pdd->lds_base = MAKE_LDS_APP_BASE_VI();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
- if (!pdd->dev->use_iommu_v2) {
- /* dGPUs: SVM aperture starting at 0
- * with small reserved space for kernel.
- * Set them to CANONICAL addresses.
- */
- pdd->gpuvm_base = SVM_USER_BASE;
- pdd->gpuvm_limit =
- pdd->dev->shared_resources.gpuvm_size - 1;
- } else {
- /* set them to non CANONICAL addresses, and no SVM is
- * allocated.
- */
- pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1);
- pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base,
- pdd->dev->shared_resources.gpuvm_size);
- }
+ /* dGPUs: SVM aperture starting at 0
+ * with small reserved space for kernel.
+ * Set them to CANONICAL addresses.
+ */
+ pdd->gpuvm_base = SVM_USER_BASE;
+ pdd->gpuvm_limit =
+ pdd->dev->kfd->shared_resources.gpuvm_size - 1;
+
+ /* dGPUs: the reserved space for kernel
+ * before SVM
+ */
+ pdd->qpd.cwsr_base = SVM_CWSR_BASE;
+ pdd->qpd.ib_base = SVM_IB_BASE;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
@@ -348,24 +345,24 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
pdd->lds_base = MAKE_LDS_APP_BASE_V9();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
- /* Raven needs SVM to support graphic handle, etc. Leave the small
- * reserved space before SVM on Raven as well, even though we don't
- * have to.
- * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
- * are used in Thunk to reserve SVM.
- */
- pdd->gpuvm_base = SVM_USER_BASE;
+ pdd->gpuvm_base = PAGE_SIZE;
pdd->gpuvm_limit =
- pdd->dev->shared_resources.gpuvm_size - 1;
+ pdd->dev->kfd->shared_resources.gpuvm_size - 1;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
+
+ /*
+ * Place TBA/TMA on opposite side of VM hole to prevent
+ * stray faults from triggering SVM on these pages.
+ */
+ pdd->qpd.cwsr_base = pdd->dev->kfd->shared_resources.gpuvm_size;
}
int kfd_init_apertures(struct kfd_process *process)
{
uint8_t id = 0;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
struct kfd_process_device *pdd;
/*Iterating over all devices*/
@@ -416,14 +413,6 @@ int kfd_init_apertures(struct kfd_process *process)
return -EINVAL;
}
}
-
- if (!dev->use_iommu_v2) {
- /* dGPUs: the reserved space for kernel
- * before SVM
- */
- pdd->qpd.cwsr_base = SVM_CWSR_BASE;
- pdd->qpd.ib_base = SVM_IB_BASE;
- }
}
dev_dbg(kfd_device, "node id %u\n", id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
new file mode 100644
index 000000000000..c7991e07b6be
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "kfd_events.h"
+#include "kfd_debug.h"
+#include "soc15_int.h"
+#include "kfd_device_queue_manager.h"
+
+/*
+ * GFX10 SQ Interrupts
+ *
+ * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit
+ * packet to the Interrupt Handler:
+ * Auto - Generated by the SQG (various cmd overflows, timestamps etc)
+ * Wave - Generated by S_SENDMSG through a shader program
+ * Error - HW generated errors (Illegal instructions, Memviols, EDC etc)
+ *
+ * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus
+ * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such:
+ *
+ * - context_id1[7:6]
+ * Encoding type (0 = Auto, 1 = Wave, 2 = Error)
+ *
+ * - context_id0[24]
+ * PRIV bit indicates that Wave S_SEND or error occurred within trap
+ *
+ * - context_id0[22:0]
+ * 23-bit data with the following layout per encoding type:
+ * Auto - only context_id0[8:0] is used, which reports various interrupts
+ * generated by SQG. The rest is 0.
+ * Wave - user data sent from m0 via S_SENDMSG
+ * Error - Error type (context_id0[22:19]), Error Details (rest of bits)
+ *
+ * The other context_id bits show coordinates (SE/SH/CU/SIMD/WGP) for wave
+ * S_SENDMSG and Errors. These are 0 for Auto.
+ */
+
+enum SQ_INTERRUPT_WORD_ENCODING {
+ SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
+ SQ_INTERRUPT_WORD_ENCODING_INST,
+ SQ_INTERRUPT_WORD_ENCODING_ERROR,
+};
+
+enum SQ_INTERRUPT_ERROR_TYPE {
+ SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0,
+ SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST,
+ SQ_INTERRUPT_ERROR_TYPE_MEMVIOL,
+ SQ_INTERRUPT_ERROR_TYPE_EDC_FED,
+};
+
+/* SQ_INTERRUPT_WORD_AUTO_CTXID */
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE__SHIFT 0
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT__SHIFT 1
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL__SHIFT 2
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL__SHIFT 3
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR__SHIFT 7
+#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID__SHIFT 4
+#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING__SHIFT 6
+
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_MASK 0x00000001
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT_MASK 0x00000002
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL_MASK 0x00000004
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL_MASK 0x00000008
+#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR_MASK 0x00000080
+#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID_MASK 0x030
+#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING_MASK 0x0c0
+
+/* SQ_INTERRUPT_WORD_WAVE_CTXID */
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA__SHIFT 0
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID__SHIFT 23
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV__SHIFT 24
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID__SHIFT 25
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID__SHIFT 30
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID__SHIFT 0
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID__SHIFT 4
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING__SHIFT 6
+
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA_MASK 0x000007fffff
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID_MASK 0x0000800000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK 0x00001000000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID_MASK 0x0003e000000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID_MASK 0x000c0000000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID_MASK 0x00f
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID_MASK 0x030
+#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING_MASK 0x0c0
+
+#define KFD_CTXID0__ERR_TYPE_MASK 0x780000
+#define KFD_CTXID0__ERR_TYPE__SHIFT 19
+
+/* GFX10 SQ interrupt ENC type bit (context_id1[7:6]) for wave s_sendmsg */
+#define KFD_CONTEXT_ID1_ENC_TYPE_WAVE_MASK 0x40
+/* GFX10 SQ interrupt PRIV bit (context_id0[24]) for s_sendmsg inside trap */
+#define KFD_CONTEXT_ID0_PRIV_MASK 0x1000000
+/*
+ * The debugger will send user data(m0) with PRIV=1 to indicate it requires
+ * notification from the KFD with the following queue id (DOORBELL_ID) and
+ * trap code (TRAP_CODE).
+ */
+#define KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK 0x0003ff
+#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT 10
+#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK 0x07fc00
+#define KFD_DEBUG_DOORBELL_ID(ctxid0) ((ctxid0) & \
+ KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK)
+#define KFD_DEBUG_TRAP_CODE(ctxid0) (((ctxid0) & \
+ KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK) \
+ >> KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT)
+#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00
+#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10
+#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \
+ KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \
+ >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT)
+
+static void event_interrupt_poison_consumption(struct kfd_node *dev,
+ uint16_t pasid, uint16_t client_id)
+{
+ int old_poison, ret = -EINVAL;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+
+ if (!p)
+ return;
+
+ /* all queues of a process will be unmapped in one time */
+ old_poison = atomic_cmpxchg(&p->poison, 0, 1);
+ kfd_unref_process(p);
+ if (old_poison)
+ return;
+
+ switch (client_id) {
+ case SOC15_IH_CLIENTID_SE0SH:
+ case SOC15_IH_CLIENTID_SE1SH:
+ case SOC15_IH_CLIENTID_SE2SH:
+ case SOC15_IH_CLIENTID_SE3SH:
+ case SOC15_IH_CLIENTID_UTCL2:
+ ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
+ break;
+ case SOC15_IH_CLIENTID_SDMA0:
+ case SOC15_IH_CLIENTID_SDMA1:
+ case SOC15_IH_CLIENTID_SDMA2:
+ case SOC15_IH_CLIENTID_SDMA3:
+ case SOC15_IH_CLIENTID_SDMA4:
+ break;
+ default:
+ break;
+ }
+
+ kfd_signal_poison_consumed_event(dev, pasid);
+
+ /* resetting queue passes, do page retirement without gpu reset
+ * resetting queue fails, fallback to gpu reset solution
+ */
+ if (!ret) {
+ dev_warn(dev->adev->dev,
+ "RAS poison consumption, unmap queue flow succeeded: client id %d\n",
+ client_id);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+ } else {
+ dev_warn(dev->adev->dev,
+ "RAS poison consumption, fall back to gpu reset flow: client id %d\n",
+ client_id);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+ }
+}
+
+static bool event_interrupt_isr_v10(struct kfd_node *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
+{
+ uint16_t source_id, client_id, pasid, vmid;
+ const uint32_t *data = ih_ring_entry;
+
+ source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+ client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+
+ /* Only handle interrupts from KFD VMIDs */
+ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
+ if (!KFD_IRQ_IS_FENCE(client_id, source_id) &&
+ (vmid < dev->vm_info.first_vmid_kfd ||
+ vmid > dev->vm_info.last_vmid_kfd))
+ return false;
+
+ pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+
+ /* Only handle clients we care about */
+ if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
+ client_id != SOC15_IH_CLIENTID_SDMA0 &&
+ client_id != SOC15_IH_CLIENTID_SDMA1 &&
+ client_id != SOC15_IH_CLIENTID_SDMA2 &&
+ client_id != SOC15_IH_CLIENTID_SDMA3 &&
+ client_id != SOC15_IH_CLIENTID_SDMA4 &&
+ client_id != SOC15_IH_CLIENTID_SDMA5 &&
+ client_id != SOC15_IH_CLIENTID_SDMA6 &&
+ client_id != SOC15_IH_CLIENTID_SDMA7 &&
+ client_id != SOC15_IH_CLIENTID_VMC &&
+ client_id != SOC15_IH_CLIENTID_VMC1 &&
+ client_id != SOC15_IH_CLIENTID_UTCL2 &&
+ client_id != SOC15_IH_CLIENTID_SE0SH &&
+ client_id != SOC15_IH_CLIENTID_SE1SH &&
+ client_id != SOC15_IH_CLIENTID_SE2SH &&
+ client_id != SOC15_IH_CLIENTID_SE3SH)
+ return false;
+
+ pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3],
+ data[4], data[5], data[6], data[7]);
+
+ /* If there is no valid PASID, it's likely a bug */
+ if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ return 0;
+
+ /* Interrupt types we care about: various signals and faults.
+ * They will be forwarded to a work queue (see below).
+ */
+ return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
+ source_id == SOC15_INTSRC_SDMA_TRAP ||
+ source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
+ source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+ client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_VMC1 ||
+ client_id == SOC15_IH_CLIENTID_UTCL2 ||
+ KFD_IRQ_IS_FENCE(client_id, source_id);
+}
+
+static void event_interrupt_wq_v10(struct kfd_node *dev,
+ const uint32_t *ih_ring_entry)
+{
+ uint16_t source_id, client_id, pasid, vmid;
+ uint32_t context_id0, context_id1;
+ uint32_t encoding, sq_intr_err_type;
+
+ source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+ client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+ pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
+ context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
+ context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry);
+
+ if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
+ client_id == SOC15_IH_CLIENTID_SE0SH ||
+ client_id == SOC15_IH_CLIENTID_SE1SH ||
+ client_id == SOC15_IH_CLIENTID_SE2SH ||
+ client_id == SOC15_IH_CLIENTID_SE3SH) {
+ if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
+ kfd_signal_event_interrupt(pasid, context_id0, 32);
+ else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) {
+ encoding = REG_GET_FIELD(context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
+ switch (encoding) {
+ case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+ pr_debug(
+ "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ WLT),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF0_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF1_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_INST:
+ pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
+ if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) {
+ if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ KFD_DEBUG_DOORBELL_ID(context_id0),
+ KFD_DEBUG_TRAP_CODE(context_id0),
+ NULL, 0))
+ return;
+ }
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
+ ERR_TYPE);
+ pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID),
+ sq_intr_err_type);
+ if (sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
+ sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
+ event_interrupt_poison_consumption(dev, pasid, source_id);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23);
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
+ kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ KFD_DEBUG_DOORBELL_ID(context_id0),
+ KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
+ NULL,
+ 0);
+ }
+ } else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
+ client_id == SOC15_IH_CLIENTID_SDMA1 ||
+ client_id == SOC15_IH_CLIENTID_SDMA2 ||
+ client_id == SOC15_IH_CLIENTID_SDMA3 ||
+ (client_id == SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid &&
+ KFD_GC_VERSION(dev) == IP_VERSION(10, 3, 0)) ||
+ client_id == SOC15_IH_CLIENTID_SDMA4 ||
+ client_id == SOC15_IH_CLIENTID_SDMA5 ||
+ client_id == SOC15_IH_CLIENTID_SDMA6 ||
+ client_id == SOC15_IH_CLIENTID_SDMA7) {
+ if (source_id == SOC15_INTSRC_SDMA_TRAP) {
+ kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
+ } else if (source_id == SOC15_INTSRC_SDMA_ECC) {
+ event_interrupt_poison_consumption(dev, pasid, source_id);
+ return;
+ }
+ } else if (client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_VMC1 ||
+ client_id == SOC15_IH_CLIENTID_UTCL2) {
+ struct kfd_vm_fault_info info = {0};
+ uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+ struct kfd_hsa_memory_exception_data exception_data;
+
+ if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
+ amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
+ event_interrupt_poison_consumption(dev, pasid, client_id);
+ return;
+ }
+
+ info.vmid = vmid;
+ info.mc_id = client_id;
+ info.page_addr = ih_ring_entry[4] |
+ (uint64_t)(ih_ring_entry[5] & 0xf) << 32;
+ info.prot_valid = ring_id & 0x08;
+ info.prot_read = ring_id & 0x10;
+ info.prot_write = ring_id & 0x20;
+
+ memset(&exception_data, 0, sizeof(exception_data));
+ exception_data.gpu_id = dev->id;
+ exception_data.va = (info.page_addr) << PAGE_SHIFT;
+ exception_data.failure.NotPresent = info.prot_valid ? 1 : 0;
+ exception_data.failure.NoExecute = info.prot_exec ? 1 : 0;
+ exception_data.failure.ReadOnly = info.prot_write ? 1 : 0;
+ exception_data.failure.imprecise = 0;
+
+ kfd_set_dbg_ev_from_interrupt(dev,
+ pasid,
+ -1,
+ KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION),
+ &exception_data,
+ sizeof(exception_data));
+ } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
+ kfd_process_close_interrupt_drain(pasid);
+ }
+}
+
+const struct kfd_event_interrupt_class event_interrupt_class_v10 = {
+ .interrupt_isr = event_interrupt_isr_v10,
+ .interrupt_wq = event_interrupt_wq_v10,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index 0d53f6067422..f933bd231fb9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -26,6 +26,7 @@
#include "kfd_device_queue_manager.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
#include "kfd_smi_events.h"
+#include "kfd_debug.h"
/*
* GFX11 SQ Interrupts
@@ -187,7 +188,7 @@ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID));
}
-static void event_interrupt_poison_consumption_v11(struct kfd_dev *dev,
+static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
uint16_t pasid, uint16_t source_id)
{
int ret = -EINVAL;
@@ -225,7 +226,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_dev *dev,
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
}
-static bool event_interrupt_isr_v11(struct kfd_dev *dev,
+static bool event_interrupt_isr_v11(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
bool *patched_flag)
@@ -238,7 +239,7 @@ static bool event_interrupt_isr_v11(struct kfd_dev *dev,
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
/* Only handle interrupts from KFD VMIDs */
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
- if (/*!KFD_IRQ_IS_FENCE(client_id, source_id) &&*/
+ if (!KFD_IRQ_IS_FENCE(client_id, source_id) &&
(vmid < dev->vm_info.first_vmid_kfd ||
vmid > dev->vm_info.last_vmid_kfd))
return false;
@@ -267,19 +268,19 @@ static bool event_interrupt_isr_v11(struct kfd_dev *dev,
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
source_id == SOC21_INTSRC_SDMA_TRAP ||
- /* KFD_IRQ_IS_FENCE(client_id, source_id) || */
+ KFD_IRQ_IS_FENCE(client_id, source_id) ||
(((client_id == SOC21_IH_CLIENTID_VMC) ||
((client_id == SOC21_IH_CLIENTID_GFX) &&
(source_id == UTCL2_1_0__SRCID__FAULT))) &&
!amdgpu_no_queue_eviction_on_vm_fault);
}
-static void event_interrupt_wq_v11(struct kfd_dev *dev,
+static void event_interrupt_wq_v11(struct kfd_node *dev,
const uint32_t *ih_ring_entry)
{
uint16_t source_id, client_id, ring_id, pasid, vmid;
uint32_t context_id0, context_id1;
- uint8_t sq_int_enc, sq_int_errtype, sq_int_priv;
+ uint8_t sq_int_enc, sq_int_priv, sq_int_errtype;
struct kfd_vm_fault_info info = {0};
struct kfd_hsa_memory_exception_data exception_data;
@@ -312,9 +313,9 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
exception_data.failure.ReadOnly = info.prot_write ? 1 : 0;
exception_data.failure.imprecise = 0;
- /*kfd_set_dbg_ev_from_interrupt(dev, pasid, -1,
+ kfd_set_dbg_ev_from_interrupt(dev, pasid, -1,
KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION),
- &exception_data, sizeof(exception_data));*/
+ &exception_data, sizeof(exception_data));
kfd_smi_event_update_vmfault(dev, pasid);
/* GRBM, SDMA, SE, PMM */
@@ -324,11 +325,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
/* CP */
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id0, 32);
- /*else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+ else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)),
- NULL, 0);*/
+ NULL, 0);
/* SDMA */
else if (source_id == SOC21_INTSRC_SDMA_TRAP)
@@ -350,11 +351,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
print_sq_intr_info_inst(context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
- /*if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_CTXID0_TRAP_CODE(context_id0),
NULL, 0)))
- return;*/
+ return;
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
print_sq_intr_info_error(context_id0, context_id1);
@@ -373,8 +374,8 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
}
- /*} else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
- kfd_process_close_interrupt_drain(pasid);*/
+ } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
+ kfd_process_close_interrupt_drain(pasid);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 0b75a37b689b..f0731a6a5306 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -23,10 +23,40 @@
#include "kfd_priv.h"
#include "kfd_events.h"
+#include "kfd_debug.h"
#include "soc15_int.h"
#include "kfd_device_queue_manager.h"
#include "kfd_smi_events.h"
+/*
+ * GFX9 SQ Interrupts
+ *
+ * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit
+ * packet to the Interrupt Handler:
+ * Auto - Generated by the SQG (various cmd overflows, timestamps etc)
+ * Wave - Generated by S_SENDMSG through a shader program
+ * Error - HW generated errors (Illegal instructions, Memviols, EDC etc)
+ *
+ * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus
+ * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such:
+ *
+ * - context_id0[27:26]
+ * Encoding type (0 = Auto, 1 = Wave, 2 = Error)
+ *
+ * - context_id0[13]
+ * PRIV bit indicates that Wave S_SEND or error occurred within trap
+ *
+ * - {context_id1[7:0],context_id0[31:28],context_id0[11:0]}
+ * 24-bit data with the following layout per encoding type:
+ * Auto - only context_id0[8:0] is used, which reports various interrupts
+ * generated by SQG. The rest is 0.
+ * Wave - user data sent from m0 via S_SENDMSG
+ * Error - Error type (context_id1[7:4]), Error Details (rest of bits)
+ *
+ * The other context_id bits show coordinates (SE/SH/CU/SIMD/WAVE) for wave
+ * S_SENDMSG and Errors. These are 0 for Auto.
+ */
+
enum SQ_INTERRUPT_WORD_ENCODING {
SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
SQ_INTERRUPT_WORD_ENCODING_INST,
@@ -84,13 +114,33 @@ enum SQ_INTERRUPT_ERROR_TYPE {
#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000
#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000
+/* GFX9 SQ interrupt 24-bit data from context_id<0,1> */
#define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \
((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000))
#define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000
#define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20
-static void event_interrupt_poison_consumption_v9(struct kfd_dev *dev,
+/*
+ * The debugger will send user data(m0) with PRIV=1 to indicate it requires
+ * notification from the KFD with the following queue id (DOORBELL_ID) and
+ * trap code (TRAP_CODE).
+ */
+#define KFD_INT_DATA_DEBUG_DOORBELL_MASK 0x0003ff
+#define KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT 10
+#define KFD_INT_DATA_DEBUG_TRAP_CODE_MASK 0x07fc00
+#define KFD_DEBUG_DOORBELL_ID(sq_int_data) ((sq_int_data) & \
+ KFD_INT_DATA_DEBUG_DOORBELL_MASK)
+#define KFD_DEBUG_TRAP_CODE(sq_int_data) (((sq_int_data) & \
+ KFD_INT_DATA_DEBUG_TRAP_CODE_MASK) \
+ >> KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT)
+#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00
+#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10
+#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \
+ KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \
+ >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT)
+
+static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
uint16_t pasid, uint16_t client_id)
{
int old_poison, ret = -EINVAL;
@@ -160,7 +210,7 @@ static bool context_id_expected(struct kfd_dev *dev)
}
}
-static bool event_interrupt_isr_v9(struct kfd_dev *dev,
+static bool event_interrupt_isr_v9(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
bool *patched_flag)
@@ -168,14 +218,16 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
uint16_t source_id, client_id, pasid, vmid;
const uint32_t *data = ih_ring_entry;
+ source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
+ client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+
/* Only handle interrupts from KFD VMIDs */
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
- if (vmid < dev->vm_info.first_vmid_kfd ||
- vmid > dev->vm_info.last_vmid_kfd)
+ if (!KFD_IRQ_IS_FENCE(client_id, source_id) &&
+ (vmid < dev->vm_info.first_vmid_kfd ||
+ vmid > dev->vm_info.last_vmid_kfd))
return false;
- source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
- client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
/* Only handle clients we care about */
@@ -194,7 +246,8 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
client_id != SOC15_IH_CLIENTID_SE0SH &&
client_id != SOC15_IH_CLIENTID_SE1SH &&
client_id != SOC15_IH_CLIENTID_SE2SH &&
- client_id != SOC15_IH_CLIENTID_SE3SH)
+ client_id != SOC15_IH_CLIENTID_SE3SH &&
+ !KFD_IRQ_IS_FENCE(client_id, source_id))
return false;
/* This is a known issue for gfx9. Under non HWS, pasid is not set
@@ -206,7 +259,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
*patched_flag = true;
memcpy(patched_ihre, ih_ring_entry,
- dev->device_info.ih_ring_entry_size);
+ dev->kfd->device_info.ih_ring_entry_size);
pasid = dev->dqm->vmid_pasid[vmid];
@@ -235,7 +288,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
uint32_t context_id =
SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
- if (context_id == 0 && context_id_expected(dev))
+ if (context_id == 0 && context_id_expected(dev->kfd))
return false;
}
@@ -247,13 +300,14 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
source_id == SOC15_INTSRC_SDMA_ECC ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+ KFD_IRQ_IS_FENCE(client_id, source_id) ||
((client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2) &&
!amdgpu_no_queue_eviction_on_vm_fault);
}
-static void event_interrupt_wq_v9(struct kfd_dev *dev,
+static void event_interrupt_wq_v9(struct kfd_node *dev,
const uint32_t *ih_ring_entry)
{
uint16_t source_id, client_id, pasid, vmid;
@@ -302,6 +356,13 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
sq_int_data);
+ if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) {
+ if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ KFD_DEBUG_DOORBELL_ID(sq_int_data),
+ KFD_DEBUG_TRAP_CODE(sq_int_data),
+ NULL, 0))
+ return;
+ }
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
@@ -324,8 +385,12 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
break;
}
kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
- } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
- kfd_signal_hw_exception_event(pasid);
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
+ kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ KFD_DEBUG_DOORBELL_ID(context_id0),
+ KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
+ NULL, 0);
+ }
} else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
client_id == SOC15_IH_CLIENTID_SDMA1 ||
client_id == SOC15_IH_CLIENTID_SDMA2 ||
@@ -345,6 +410,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+ struct kfd_hsa_memory_exception_data exception_data;
if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
@@ -360,13 +426,56 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
info.prot_read = ring_id & 0x10;
info.prot_write = ring_id & 0x20;
+ memset(&exception_data, 0, sizeof(exception_data));
+ exception_data.gpu_id = dev->id;
+ exception_data.va = (info.page_addr) << PAGE_SHIFT;
+ exception_data.failure.NotPresent = info.prot_valid ? 1 : 0;
+ exception_data.failure.NoExecute = info.prot_exec ? 1 : 0;
+ exception_data.failure.ReadOnly = info.prot_write ? 1 : 0;
+ exception_data.failure.imprecise = 0;
+
+ kfd_set_dbg_ev_from_interrupt(dev,
+ pasid,
+ -1,
+ KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION),
+ &exception_data,
+ sizeof(exception_data));
kfd_smi_event_update_vmfault(dev, pasid);
- kfd_dqm_evict_pasid(dev->dqm, pasid);
- kfd_signal_vm_fault_event(dev, pasid, &info);
+ } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
+ kfd_process_close_interrupt_drain(pasid);
}
}
+static bool event_interrupt_isr_v9_4_3(struct kfd_node *node,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
+{
+ uint16_t node_id, vmid;
+
+ /*
+ * For GFX 9.4.3, process the interrupt if:
+ * - NodeID field in IH entry matches the corresponding bit
+ * set in interrupt_bitmap Bits 0-15.
+ * OR
+ * - If partition mode is CPX and interrupt came from
+ * Node_id 0,4,8,12, then check if the Bit (16 + client id)
+ * is set in interrupt bitmap Bits 16-31.
+ */
+ node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
+ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
+ if (kfd_irq_is_from_node(node, node_id, vmid))
+ return event_interrupt_isr_v9(node, ih_ring_entry,
+ patched_ihre, patched_flag);
+ return false;
+}
+
const struct kfd_event_interrupt_class event_interrupt_class_v9 = {
.interrupt_isr = event_interrupt_isr_v9,
.interrupt_wq = event_interrupt_wq_v9,
};
+
+const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3 = {
+ .interrupt_isr = event_interrupt_isr_v9_4_3,
+ .interrupt_wq = event_interrupt_wq_v9,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 34772fe74296..dd3c43c1ad70 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -50,29 +50,29 @@
static void interrupt_wq(struct work_struct *);
-int kfd_interrupt_init(struct kfd_dev *kfd)
+int kfd_interrupt_init(struct kfd_node *node)
{
int r;
- r = kfifo_alloc(&kfd->ih_fifo,
- KFD_IH_NUM_ENTRIES * kfd->device_info.ih_ring_entry_size,
+ r = kfifo_alloc(&node->ih_fifo,
+ KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size,
GFP_KERNEL);
if (r) {
- dev_err(kfd->adev->dev, "Failed to allocate IH fifo\n");
+ dev_err(node->adev->dev, "Failed to allocate IH fifo\n");
return r;
}
- kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
- if (unlikely(!kfd->ih_wq)) {
- kfifo_free(&kfd->ih_fifo);
- dev_err(kfd->adev->dev, "Failed to allocate KFD IH workqueue\n");
+ node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
+ if (unlikely(!node->ih_wq)) {
+ kfifo_free(&node->ih_fifo);
+ dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
return -ENOMEM;
}
- spin_lock_init(&kfd->interrupt_lock);
+ spin_lock_init(&node->interrupt_lock);
- INIT_WORK(&kfd->interrupt_work, interrupt_wq);
+ INIT_WORK(&node->interrupt_work, interrupt_wq);
- kfd->interrupts_active = true;
+ node->interrupts_active = true;
/*
* After this function returns, the interrupt will be enabled. This
@@ -84,7 +84,7 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
return 0;
}
-void kfd_interrupt_exit(struct kfd_dev *kfd)
+void kfd_interrupt_exit(struct kfd_node *node)
{
/*
* Stop the interrupt handler from writing to the ring and scheduling
@@ -93,31 +93,31 @@ void kfd_interrupt_exit(struct kfd_dev *kfd)
*/
unsigned long flags;
- spin_lock_irqsave(&kfd->interrupt_lock, flags);
- kfd->interrupts_active = false;
- spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
+ spin_lock_irqsave(&node->interrupt_lock, flags);
+ node->interrupts_active = false;
+ spin_unlock_irqrestore(&node->interrupt_lock, flags);
/*
* flush_work ensures that there are no outstanding
* work-queue items that will access interrupt_ring. New work items
* can't be created because we stopped interrupt handling above.
*/
- flush_workqueue(kfd->ih_wq);
+ flush_workqueue(node->ih_wq);
- kfifo_free(&kfd->ih_fifo);
+ kfifo_free(&node->ih_fifo);
}
/*
* Assumption: single reader/writer. This function is not re-entrant
*/
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
+bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry)
{
int count;
- count = kfifo_in(&kfd->ih_fifo, ih_ring_entry,
- kfd->device_info.ih_ring_entry_size);
- if (count != kfd->device_info.ih_ring_entry_size) {
- dev_dbg_ratelimited(kfd->adev->dev,
+ count = kfifo_in(&node->ih_fifo, ih_ring_entry,
+ node->kfd->device_info.ih_ring_entry_size);
+ if (count != node->kfd->device_info.ih_ring_entry_size) {
+ dev_dbg_ratelimited(node->adev->dev,
"Interrupt ring overflow, dropping interrupt %d\n",
count);
return false;
@@ -129,32 +129,32 @@ bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
/*
* Assumption: single reader/writer. This function is not re-entrant
*/
-static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
+static bool dequeue_ih_ring_entry(struct kfd_node *node, void *ih_ring_entry)
{
int count;
- count = kfifo_out(&kfd->ih_fifo, ih_ring_entry,
- kfd->device_info.ih_ring_entry_size);
+ count = kfifo_out(&node->ih_fifo, ih_ring_entry,
+ node->kfd->device_info.ih_ring_entry_size);
- WARN_ON(count && count != kfd->device_info.ih_ring_entry_size);
+ WARN_ON(count && count != node->kfd->device_info.ih_ring_entry_size);
- return count == kfd->device_info.ih_ring_entry_size;
+ return count == node->kfd->device_info.ih_ring_entry_size;
}
static void interrupt_wq(struct work_struct *work)
{
- struct kfd_dev *dev = container_of(work, struct kfd_dev,
+ struct kfd_node *dev = container_of(work, struct kfd_node,
interrupt_work);
uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
unsigned long start_jiffies = jiffies;
- if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
+ if (dev->kfd->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
dev_err_once(dev->adev->dev, "Ring entry too small\n");
return;
}
while (dequeue_ih_ring_entry(dev, ih_ring_entry)) {
- dev->device_info.event_interrupt_class->interrupt_wq(dev,
+ dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev,
ih_ring_entry);
if (time_is_before_jiffies(start_jiffies + HZ)) {
/* If we spent more than a second processing signals,
@@ -166,14 +166,14 @@ static void interrupt_wq(struct work_struct *work)
}
}
-bool interrupt_is_wanted(struct kfd_dev *dev,
+bool interrupt_is_wanted(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre, bool *flag)
{
/* integer and bitwise OR so there is no boolean short-circuiting */
unsigned int wanted = 0;
- wanted |= dev->device_info.event_interrupt_class->interrupt_isr(dev,
+ wanted |= dev->kfd->device_info.event_interrupt_class->interrupt_isr(dev,
ih_ring_entry, patched_ihre, flag);
return wanted != 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
deleted file mode 100644
index ec1bf611624e..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ /dev/null
@@ -1,349 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/*
- * Copyright 2018-2022 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/kconfig.h>
-
-#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
-
-#include <linux/printk.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/amd-iommu.h>
-#include "kfd_priv.h"
-#include "kfd_topology.h"
-#include "kfd_iommu.h"
-
-static const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
- AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
- AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
-
-/** kfd_iommu_check_device - Check whether IOMMU is available for device
- */
-int kfd_iommu_check_device(struct kfd_dev *kfd)
-{
- struct amd_iommu_device_info iommu_info;
- int err;
-
- if (!kfd->use_iommu_v2)
- return -ENODEV;
-
- iommu_info.flags = 0;
- err = amd_iommu_device_info(kfd->adev->pdev, &iommu_info);
- if (err)
- return err;
-
- if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags)
- return -ENODEV;
-
- return 0;
-}
-
-/** kfd_iommu_device_init - Initialize IOMMU for device
- */
-int kfd_iommu_device_init(struct kfd_dev *kfd)
-{
- struct amd_iommu_device_info iommu_info;
- unsigned int pasid_limit;
- int err;
-
- if (!kfd->use_iommu_v2)
- return 0;
-
- iommu_info.flags = 0;
- err = amd_iommu_device_info(kfd->adev->pdev, &iommu_info);
- if (err < 0) {
- dev_err(kfd_device,
- "error getting iommu info. is the iommu enabled?\n");
- return -ENODEV;
- }
-
- if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
- dev_err(kfd_device,
- "error required iommu flags ats %i, pri %i, pasid %i\n",
- (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
- (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
- (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
- != 0);
- return -ENODEV;
- }
-
- pasid_limit = min_t(unsigned int,
- (unsigned int)(1 << kfd->device_info.max_pasid_bits),
- iommu_info.max_pasids);
-
- if (!kfd_set_pasid_limit(pasid_limit)) {
- dev_err(kfd_device, "error setting pasid limit\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-/** kfd_iommu_bind_process_to_device - Have the IOMMU bind a process
- *
- * Binds the given process to the given device using its PASID. This
- * enables IOMMUv2 address translation for the process on the device.
- *
- * This function assumes that the process mutex is held.
- */
-int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
-{
- struct kfd_dev *dev = pdd->dev;
- struct kfd_process *p = pdd->process;
- int err;
-
- if (!dev->use_iommu_v2 || pdd->bound == PDD_BOUND)
- return 0;
-
- if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
- pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
- return -EINVAL;
- }
-
- err = amd_iommu_bind_pasid(dev->adev->pdev, p->pasid, p->lead_thread);
- if (!err)
- pdd->bound = PDD_BOUND;
-
- return err;
-}
-
-/** kfd_iommu_unbind_process - Unbind process from all devices
- *
- * This removes all IOMMU device bindings of the process. To be used
- * before process termination.
- */
-void kfd_iommu_unbind_process(struct kfd_process *p)
-{
- int i;
-
- for (i = 0; i < p->n_pdds; i++)
- if (p->pdds[i]->bound == PDD_BOUND)
- amd_iommu_unbind_pasid(p->pdds[i]->dev->adev->pdev,
- p->pasid);
-}
-
-/* Callback for process shutdown invoked by the IOMMU driver */
-static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid)
-{
- struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
- struct kfd_process *p;
- struct kfd_process_device *pdd;
-
- if (!dev)
- return;
-
- /*
- * Look for the process that matches the pasid. If there is no such
- * process, we either released it in amdkfd's own notifier, or there
- * is a bug. Unfortunately, there is no way to tell...
- */
- p = kfd_lookup_process_by_pasid(pasid);
- if (!p)
- return;
-
- pr_debug("Unbinding process 0x%x from IOMMU\n", pasid);
-
- mutex_lock(&p->mutex);
-
- pdd = kfd_get_process_device_data(dev, p);
- if (pdd)
- /* For GPU relying on IOMMU, we need to dequeue here
- * when PASID is still bound.
- */
- kfd_process_dequeue_from_device(pdd);
-
- mutex_unlock(&p->mutex);
-
- kfd_unref_process(p);
-}
-
-/* This function called by IOMMU driver on PPR failure */
-static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid,
- unsigned long address, u16 flags)
-{
- struct kfd_dev *dev;
-
- dev_warn_ratelimited(kfd_device,
- "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
- pdev->bus->number,
- PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn),
- pasid,
- address,
- flags);
-
- dev = kfd_device_by_pci_dev(pdev);
- if (!WARN_ON(!dev))
- kfd_signal_iommu_event(dev, pasid, address,
- flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
-
- return AMD_IOMMU_INV_PRI_RSP_INVALID;
-}
-
-/*
- * Bind processes do the device that have been temporarily unbound
- * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
- */
-static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
-{
- struct kfd_process_device *pdd;
- struct kfd_process *p;
- unsigned int temp;
- int err = 0;
-
- int idx = srcu_read_lock(&kfd_processes_srcu);
-
- hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- mutex_lock(&p->mutex);
- pdd = kfd_get_process_device_data(kfd, p);
-
- if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) {
- mutex_unlock(&p->mutex);
- continue;
- }
-
- err = amd_iommu_bind_pasid(kfd->adev->pdev, p->pasid,
- p->lead_thread);
- if (err < 0) {
- pr_err("Unexpected pasid 0x%x binding failure\n",
- p->pasid);
- mutex_unlock(&p->mutex);
- break;
- }
-
- pdd->bound = PDD_BOUND;
- mutex_unlock(&p->mutex);
- }
-
- srcu_read_unlock(&kfd_processes_srcu, idx);
-
- return err;
-}
-
-/*
- * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
- * processes will be restored to PDD_BOUND state in
- * kfd_bind_processes_to_device.
- */
-static void kfd_unbind_processes_from_device(struct kfd_dev *kfd)
-{
- struct kfd_process_device *pdd;
- struct kfd_process *p;
- unsigned int temp;
-
- int idx = srcu_read_lock(&kfd_processes_srcu);
-
- hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- mutex_lock(&p->mutex);
- pdd = kfd_get_process_device_data(kfd, p);
-
- if (WARN_ON(!pdd)) {
- mutex_unlock(&p->mutex);
- continue;
- }
-
- if (pdd->bound == PDD_BOUND)
- pdd->bound = PDD_BOUND_SUSPENDED;
- mutex_unlock(&p->mutex);
- }
-
- srcu_read_unlock(&kfd_processes_srcu, idx);
-}
-
-/** kfd_iommu_suspend - Prepare IOMMU for suspend
- *
- * This unbinds processes from the device and disables the IOMMU for
- * the device.
- */
-void kfd_iommu_suspend(struct kfd_dev *kfd)
-{
- if (!kfd->use_iommu_v2)
- return;
-
- kfd_unbind_processes_from_device(kfd);
-
- amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL);
- amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL);
- amd_iommu_free_device(kfd->adev->pdev);
-}
-
-/** kfd_iommu_resume - Restore IOMMU after resume
- *
- * This reinitializes the IOMMU for the device and re-binds previously
- * suspended processes to the device.
- */
-int kfd_iommu_resume(struct kfd_dev *kfd)
-{
- unsigned int pasid_limit;
- int err;
-
- if (!kfd->use_iommu_v2)
- return 0;
-
- pasid_limit = kfd_get_pasid_limit();
-
- err = amd_iommu_init_device(kfd->adev->pdev, pasid_limit);
- if (err)
- return -ENXIO;
-
- amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev,
- iommu_pasid_shutdown_callback);
- amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev,
- iommu_invalid_ppr_cb);
-
- err = kfd_bind_processes_to_device(kfd);
- if (err) {
- amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL);
- amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL);
- amd_iommu_free_device(kfd->adev->pdev);
- return err;
- }
-
- return 0;
-}
-
-/** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology
- */
-int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
-{
- struct kfd_perf_properties *props;
-
- if (!(kdev->node_props.capability & HSA_CAP_ATS_PRESENT))
- return 0;
-
- if (!amd_iommu_pc_supported())
- return 0;
-
- props = kfd_alloc_struct(props);
- if (!props)
- return -ENOMEM;
- strcpy(props->block_name, "iommu");
- props->max_concurrent = amd_iommu_pc_get_max_banks(0) *
- amd_iommu_pc_get_max_counters(0); /* assume one iommu */
- list_add_tail(&props->list, &kdev->perf_props);
-
- return 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
deleted file mode 100644
index 8cf0fcbe87c2..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/*
- * Copyright 2018-2022 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __KFD_IOMMU_H__
-#define __KFD_IOMMU_H__
-
-#include <linux/kconfig.h>
-
-#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
-
-#define KFD_SUPPORT_IOMMU_V2
-
-int kfd_iommu_check_device(struct kfd_dev *kfd);
-int kfd_iommu_device_init(struct kfd_dev *kfd);
-
-int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd);
-void kfd_iommu_unbind_process(struct kfd_process *p);
-
-void kfd_iommu_suspend(struct kfd_dev *kfd);
-int kfd_iommu_resume(struct kfd_dev *kfd);
-
-int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev);
-
-#else
-
-static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
-{
- return -ENODEV;
-}
-static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
-{
-#if IS_MODULE(CONFIG_AMD_IOMMU_V2)
- WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD");
-#endif
- return 0;
-}
-
-static inline int kfd_iommu_bind_process_to_device(
- struct kfd_process_device *pdd)
-{
- return 0;
-}
-static inline void kfd_iommu_unbind_process(struct kfd_process *p)
-{
- /* empty */
-}
-
-static inline void kfd_iommu_suspend(struct kfd_dev *kfd)
-{
- /* empty */
-}
-static inline int kfd_iommu_resume(struct kfd_dev *kfd)
-{
- return 0;
-}
-
-static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
-{
- return 0;
-}
-
-#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */
-
-#endif /* __KFD_IOMMU_H__ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index bcf7bc3302c9..1bea629c49ca 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -38,7 +38,7 @@
/* Initialize a kernel queue, including allocations of GART memory
* needed for the queue.
*/
-static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
enum kfd_queue_type type, unsigned int queue_size)
{
struct queue_properties prop;
@@ -75,7 +75,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
if (!kq->mqd_mgr)
return false;
- prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
+ prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off);
if (!prop.doorbell_ptr) {
pr_err("Failed to initialize doorbell");
@@ -112,7 +112,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
- retval = kfd_gtt_sa_allocate(dev, dev->device_info.doorbell_size,
+ retval = kfd_gtt_sa_allocate(dev, dev->kfd->device_info.doorbell_size,
&kq->wptr_mem);
if (retval != 0)
@@ -189,7 +189,7 @@ err_rptr_allocate_vidmem:
err_eop_allocate_vidmem:
kfd_gtt_sa_free(dev, kq->pq);
err_pq_allocate_vidmem:
- kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
+ kfd_release_kernel_doorbell(dev->kfd, prop.doorbell_ptr);
err_get_kernel_doorbell:
return false;
@@ -220,7 +220,7 @@ static void kq_uninitialize(struct kernel_queue *kq, bool hanging)
kfd_gtt_sa_free(kq->dev, kq->eop_mem);
kfd_gtt_sa_free(kq->dev, kq->pq);
- kfd_release_kernel_doorbell(kq->dev,
+ kfd_release_kernel_doorbell(kq->dev->kfd,
kq->queue->properties.doorbell_ptr);
uninit_queue(kq->queue);
}
@@ -298,7 +298,7 @@ void kq_submit_packet(struct kernel_queue *kq)
}
pr_debug("\n");
#endif
- if (kq->dev->device_info.doorbell_size == 8) {
+ if (kq->dev->kfd->device_info.doorbell_size == 8) {
*kq->wptr64_kernel = kq->pending_wptr64;
write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
kq->pending_wptr64);
@@ -311,7 +311,7 @@ void kq_submit_packet(struct kernel_queue *kq)
void kq_rollback_packet(struct kernel_queue *kq)
{
- if (kq->dev->device_info.doorbell_size == 8) {
+ if (kq->dev->kfd->device_info.doorbell_size == 8) {
kq->pending_wptr64 = *kq->wptr64_kernel;
kq->pending_wptr = *kq->wptr_kernel %
(kq->queue->properties.queue_size / 4);
@@ -320,7 +320,7 @@ void kq_rollback_packet(struct kernel_queue *kq)
}
}
-struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
enum kfd_queue_type type)
{
struct kernel_queue *kq;
@@ -345,7 +345,7 @@ void kernel_queue_uninit(struct kernel_queue *kq, bool hanging)
}
/* FIXME: Can this test be removed? */
-static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
+static __attribute__((unused)) void test_kq(struct kfd_node *dev)
{
struct kernel_queue *kq;
uint32_t *buffer, i;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 383202fd1ea2..9a6244430845 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -53,7 +53,7 @@ void kq_rollback_packet(struct kernel_queue *kq);
struct kernel_queue {
/* data */
- struct kfd_dev *dev;
+ struct kfd_node *dev;
struct mqd_manager *mqd_mgr;
struct queue *queue;
uint64_t pending_wptr64;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 54933903bcb8..7d82c7da223a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -64,7 +64,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = npages * 8;
- r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED,
@@ -206,7 +206,7 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
{
- return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT;
+ return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT;
}
static void
@@ -236,7 +236,7 @@ svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
unsigned long addr;
addr = page_to_pfn(page) << PAGE_SHIFT;
- return (addr - adev->kfd.dev->pgmap.range.start);
+ return (addr - adev->kfd.pgmap.range.start);
}
static struct page *
@@ -287,11 +287,12 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
}
static int
-svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t ttm_res_offset)
{
- uint64_t npages = migrate->npages;
+ uint64_t npages = migrate->cpages;
+ struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
dma_addr_t *src;
@@ -321,7 +322,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
if (r) {
- dev_err(adev->dev, "%s: fail %d dma_map_page\n",
+ dev_err(dev, "%s: fail %d dma_map_page\n",
__func__, r);
goto out_free_vram_pages;
}
@@ -390,12 +391,13 @@ out_free_vram_pages:
}
static long
-svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
+ struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
@@ -421,9 +423,9 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.dst = migrate.src + npages;
scratch = (dma_addr_t *)(migrate.dst + npages);
- kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
+ kfd_smi_event_migration_start(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- 0, adev->kfd.dev->id, prange->prefetch_loc,
+ 0, node->id, prange->prefetch_loc,
prange->preferred_loc, trigger);
r = migrate_vma_setup(&migrate);
@@ -445,7 +447,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
else
pr_debug("0x%lx pages migrated\n", cpages);
- r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
+ r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
@@ -454,18 +456,17 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
+ kfd_smi_event_migration_end(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- 0, adev->kfd.dev->id, trigger);
+ 0, node->id, trigger);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
- svm_range_free_dma_mappings(prange);
out_free:
kvfree(buf);
out:
if (!r && cpages) {
- pdd = svm_range_get_pdd_by_adev(prange, adev);
+ pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
@@ -492,8 +493,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
- struct amdgpu_device *adev;
uint64_t ttm_res_offset;
+ struct kfd_node *node;
unsigned long cpages = 0;
long r = 0;
@@ -503,9 +504,9 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
return 0;
}
- adev = svm_range_get_adev_by_id(prange, best_loc);
- if (!adev) {
- pr_debug("failed to get device by id 0x%x\n", best_loc);
+ node = svm_range_get_node_by_id(prange, best_loc);
+ if (!node) {
+ pr_debug("failed to get kfd node by id 0x%x\n", best_loc);
return -ENODEV;
}
@@ -515,9 +516,9 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
- r = svm_range_vram_node_new(adev, prange, true);
+ r = svm_range_vram_node_new(node, prange, true);
if (r) {
- dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
+ dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
return r;
}
ttm_res_offset = prange->offset << PAGE_SHIFT;
@@ -530,7 +531,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
break;
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
+ r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset);
if (r < 0) {
pr_debug("failed %ld to migrate\n", r);
break;
@@ -541,10 +542,12 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
addr = next;
}
- if (cpages)
+ if (cpages) {
prange->actual_loc = best_loc;
- else
+ svm_range_free_dma_mappings(prange, true);
+ } else {
svm_range_vram_node_free(prange);
+ }
return r < 0 ? r : 0;
}
@@ -649,11 +652,13 @@ out_oom:
/**
* svm_migrate_vma_to_ram - migrate range inside one vma from device to system
*
- * @adev: amdgpu device to migrate from
* @prange: svm range structure
* @vma: vm_area_struct that range [start, end] belongs to
* @start: range start virtual address in pages
* @end: range end virtual address in pages
+ * @node: kfd node device to migrate from
+ * @trigger: reason of migration
+ * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
*
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
@@ -663,7 +668,7 @@ out_oom:
* positive values - partial migration, number of pages not migrated
*/
static long
-svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
+svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end,
uint32_t trigger, struct page *fault_page)
{
@@ -671,6 +676,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t npages = (end - start) >> PAGE_SHIFT;
unsigned long upages = npages;
unsigned long cpages = 0;
+ struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
@@ -699,9 +705,9 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.fault_page = fault_page;
scratch = (dma_addr_t *)(migrate.dst + npages);
- kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
+ kfd_smi_event_migration_start(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- adev->kfd.dev->id, 0, prange->prefetch_loc,
+ node->id, 0, prange->prefetch_loc,
prange->preferred_loc, trigger);
r = migrate_vma_setup(&migrate);
@@ -735,9 +741,9 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
+ kfd_smi_event_migration_end(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- adev->kfd.dev->id, 0, trigger);
+ node->id, 0, trigger);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
@@ -745,7 +751,7 @@ out_free:
kvfree(buf);
out:
if (!r && cpages) {
- pdd = svm_range_get_pdd_by_adev(prange, adev);
+ pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
}
@@ -757,6 +763,7 @@ out:
* @prange: range structure
* @mm: process mm, use current->mm if NULL
* @trigger: reason of migration
+ * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
*
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
@@ -766,7 +773,7 @@ out:
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
uint32_t trigger, struct page *fault_page)
{
- struct amdgpu_device *adev;
+ struct kfd_node *node;
struct vm_area_struct *vma;
unsigned long addr;
unsigned long start;
@@ -780,13 +787,11 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
return 0;
}
- adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
- if (!adev) {
- pr_debug("failed to get device by id 0x%x\n",
- prange->actual_loc);
+ node = svm_range_get_node_by_id(prange, prange->actual_loc);
+ if (!node) {
+ pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
return -ENODEV;
}
-
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
prange->svms, prange, prange->start, prange->last,
prange->actual_loc);
@@ -805,7 +810,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
+ r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger,
fault_page);
if (r < 0) {
pr_debug("failed %ld to migrate prange %p\n", r, prange);
@@ -987,18 +992,21 @@ static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
/* Each VRAM page uses sizeof(struct page) on system memory */
#define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
-int svm_migrate_init(struct amdgpu_device *adev)
+int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
{
- struct kfd_dev *kfddev = adev->kfd.dev;
+ struct amdgpu_kfd_dev *kfddev = &adev->kfd;
struct dev_pagemap *pgmap;
struct resource *res = NULL;
unsigned long size;
void *r;
- /* Page migration works on Vega10 or newer */
- if (!KFD_IS_SOC15(kfddev))
+ /* Page migration works on gfx9 or newer */
+ if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1))
return -EINVAL;
+ if (adev->gmc.is_app_apu)
+ return 0;
+
pgmap = &kfddev->pgmap;
memset(pgmap, 0, sizeof(*pgmap));
@@ -1041,8 +1049,6 @@ int svm_migrate_init(struct amdgpu_device *adev)
amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
- svm_range_set_max_pages(adev);
-
pr_info("HMM registered %ldMB device memory\n", size >> 20);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index a5d7e6d22264..487f26368164 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -47,15 +47,6 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
-int svm_migrate_init(struct amdgpu_device *adev);
-
-#else
-
-static inline int svm_migrate_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
#endif /* KFD_MIGRATE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 623ccd227b7d..d01bb57733b3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -46,9 +46,9 @@ int pipe_priority_map[] = {
KFD_PIPE_PRIORITY_CS_HIGH
};
-struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_properties *q)
+struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q)
{
- struct kfd_mem_obj *mqd_mem_obj = NULL;
+ struct kfd_mem_obj *mqd_mem_obj;
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!mqd_mem_obj)
@@ -61,10 +61,10 @@ struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_propertie
return mqd_mem_obj;
}
-struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev,
+struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev,
struct queue_properties *q)
{
- struct kfd_mem_obj *mqd_mem_obj = NULL;
+ struct kfd_mem_obj *mqd_mem_obj;
uint64_t offset;
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
@@ -72,11 +72,12 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev,
return NULL;
offset = (q->sdma_engine_id *
- dev->device_info.num_sdma_queues_per_engine +
+ dev->kfd->device_info.num_sdma_queues_per_engine +
q->sdma_queue_id) *
dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size;
- offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
+ offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
+ NUM_XCC(dev->xcc_mask);
mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem
+ offset);
@@ -189,7 +190,7 @@ int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
struct queue_properties *p, struct mm_struct *mms)
{
return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
- queue_id, p->doorbell_off);
+ queue_id, p->doorbell_off, 0);
}
int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
@@ -197,7 +198,7 @@ int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout,
- pipe_id, queue_id);
+ pipe_id, queue_id, 0);
}
void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
@@ -216,7 +217,7 @@ bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address,
- pipe_id, queue_id);
+ pipe_id, queue_id, 0);
}
int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -246,3 +247,28 @@ bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
{
return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
}
+
+uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev)
+{
+ return dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
+}
+
+void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd_mem_obj,
+ uint32_t virtual_xcc_id)
+{
+ uint64_t offset;
+
+ offset = kfd_hiq_mqd_stride(dev) * virtual_xcc_id;
+
+ mqd_mem_obj->gtt_mem = (virtual_xcc_id == 0) ?
+ dev->dqm->hiq_sdma_mqd.gtt_mem : NULL;
+ mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset;
+ mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)
+ dev->dqm->hiq_sdma_mqd.cpu_ptr + offset);
+}
+
+uint64_t kfd_mqd_stride(struct mqd_manager *mm,
+ struct queue_properties *q)
+{
+ return mm->mqd_size;
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 57f900ccaa10..23158db7da03 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -68,7 +68,7 @@
*/
extern int pipe_priority_map[];
struct mqd_manager {
- struct kfd_mem_obj* (*allocate_mqd)(struct kfd_dev *kfd,
+ struct kfd_mem_obj* (*allocate_mqd)(struct kfd_node *kfd,
struct queue_properties *q);
void (*init_mqd)(struct mqd_manager *mm, void **mqd,
@@ -97,6 +97,7 @@ struct mqd_manager {
uint32_t queue_id);
int (*get_wave_state)(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size);
@@ -119,16 +120,18 @@ struct mqd_manager {
int (*debugfs_show_mqd)(struct seq_file *m, void *data);
#endif
uint32_t (*read_doorbell_id)(void *mqd);
+ uint64_t (*mqd_stride)(struct mqd_manager *mm,
+ struct queue_properties *p);
struct mutex mqd_mutex;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
uint32_t mqd_size;
};
-struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev,
+struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev,
struct queue_properties *q);
-struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev,
+struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev,
struct queue_properties *q);
void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj);
@@ -164,4 +167,10 @@ bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id);
+void kfd_get_hiq_xcc_mqd(struct kfd_node *dev,
+ struct kfd_mem_obj *mqd_mem_obj, uint32_t virtual_xcc_id);
+
+uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev);
+uint64_t kfd_mqd_stride(struct mqd_manager *mm,
+ struct queue_properties *q);
#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 4889865c725c..ee1d32d957f2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -48,8 +48,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct cik_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
- !minfo->cu_mask.ptr)
+ if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
@@ -74,7 +73,7 @@ static void set_priority(struct cik_mqd *m, struct queue_properties *q)
m->cp_hqd_queue_priority = q->priority;
}
-static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
@@ -167,7 +166,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
- wptr_shift, wptr_mask, mms);
+ wptr_shift, wptr_mask, mms, 0);
}
static void __update_mqd(struct mqd_manager *mm, void *mqd,
@@ -207,13 +206,6 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-static void update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q,
- struct mqd_update_info *minfo)
-{
- __update_mqd(mm, mqd, q, minfo, 1);
-}
-
static uint32_t read_doorbell_id(void *mqd)
{
struct cik_mqd *m = (struct cik_mqd *)mqd;
@@ -221,9 +213,9 @@ static uint32_t read_doorbell_id(void *mqd)
return m->queue_doorbell_id0;
}
-static void update_mqd_hawaii(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q,
- struct mqd_update_info *minfo)
+static void update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
__update_mqd(mm, mqd, q, minfo, 0);
}
@@ -388,9 +380,8 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
#endif
-
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
@@ -428,6 +419,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct cik_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -442,6 +434,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct cik_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -457,6 +450,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct cik_sdma_rlc_registers);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
@@ -468,16 +462,3 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
return mqd;
}
-
-struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
-{
- struct mqd_manager *mqd;
-
- mqd = mqd_manager_init_cik(type, dev);
- if (!mqd)
- return NULL;
- if (type == KFD_MQD_TYPE_CP)
- mqd->update_mqd = update_mqd_hawaii;
- return mqd;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index d3e2b6a599a4..83699392c808 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -48,8 +48,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct v10_compute_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
- !minfo->cu_mask.ptr)
+ if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
@@ -74,7 +73,7 @@ static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
m->cp_hqd_queue_priority = q->priority;
}
-static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
@@ -117,12 +116,17 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+ /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
+ * DISPATCH_PTR. This is required for the kfd debugger
+ */
+ m->cp_hqd_hq_scheduler0 = 1 << 14;
+
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
}
- if (mm->dev->cwsr_enabled) {
+ if (mm->dev->kfd->cwsr_enabled) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
@@ -151,7 +155,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
- wptr_shift, 0, mms);
+ wptr_shift, 0, mms, 0);
return r;
}
@@ -210,7 +214,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_doorbell_control |=
1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
}
- if (mm->dev->cwsr_enabled)
+ if (mm->dev->kfd->cwsr_enabled)
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, minfo);
@@ -227,11 +231,13 @@ static uint32_t read_doorbell_id(void *mqd)
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct v10_compute_mqd *m;
+ struct kfd_context_save_area_header header;
m = get_mqd(mqd);
@@ -250,6 +256,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
* accessible to user mode
*/
+ header.wave_state.control_stack_size = *ctl_stack_used_size;
+ header.wave_state.wave_state_size = *save_area_used_size;
+
+ header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
+ header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
+
+ if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
+ return -EFAULT;
+
return 0;
}
@@ -303,6 +318,26 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
+static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type, unsigned int timeout,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ int err;
+ struct v10_compute_mqd *m;
+ u32 doorbell_off;
+
+ m = get_mqd(mqd);
+
+ doorbell_off = m->cp_hqd_pq_doorbell_control >>
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+
+ err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
+ if (err)
+ pr_debug("Destroy HIQ MQD failed: %d\n", err);
+
+ return err;
+}
+
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -405,7 +440,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
#endif
struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
@@ -432,6 +467,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
mqd->get_wave_state = get_wave_state;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -444,9 +480,10 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->destroy_mqd = destroy_hiq_mqd;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v10_compute_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -478,6 +515,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct v10_sdma_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 5aa75f72caa1..2319467d2d95 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -46,15 +46,33 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
{
struct v11_compute_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
+ bool has_wa_flag = minfo && (minfo->update_flag & (UPDATE_FLAG_DBG_WA_ENABLE |
+ UPDATE_FLAG_DBG_WA_DISABLE));
- if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
- !minfo->cu_mask.ptr)
+ if (!minfo || !(has_wa_flag || minfo->cu_mask.ptr))
return;
+ m = get_mqd(mqd);
+
+ if (has_wa_flag) {
+ uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ?
+ 0xffff : 0xffffffff;
+
+ m->compute_static_thread_mgmt_se0 = wa_mask;
+ m->compute_static_thread_mgmt_se1 = wa_mask;
+ m->compute_static_thread_mgmt_se2 = wa_mask;
+ m->compute_static_thread_mgmt_se3 = wa_mask;
+ m->compute_static_thread_mgmt_se4 = wa_mask;
+ m->compute_static_thread_mgmt_se5 = wa_mask;
+ m->compute_static_thread_mgmt_se6 = wa_mask;
+ m->compute_static_thread_mgmt_se7 = wa_mask;
+
+ return;
+ }
+
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
- m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
@@ -81,7 +99,7 @@ static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q)
m->cp_hqd_queue_priority = q->priority;
}
-static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
@@ -91,12 +109,12 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
* MES write to areas beyond MQD size. So allocate
* 1 PAGE_SIZE memory for MQD is MES is enabled.
*/
- if (kfd->shared_resources.enable_mes)
+ if (node->kfd->shared_resources.enable_mes)
size = PAGE_SIZE;
else
size = sizeof(struct v11_compute_mqd);
- if (kfd_gtt_sa_allocate(kfd, size, &mqd_mem_obj))
+ if (kfd_gtt_sa_allocate(node, size, &mqd_mem_obj))
return NULL;
return mqd_mem_obj;
@@ -109,11 +127,12 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
uint64_t addr;
struct v11_compute_mqd *m;
int size;
+ uint32_t wa_mask = q->is_dbg_wa ? 0xffff : 0xffffffff;
m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
- if (mm->dev->shared_resources.enable_mes)
+ if (mm->dev->kfd->shared_resources.enable_mes)
size = PAGE_SIZE;
else
size = sizeof(struct v11_compute_mqd);
@@ -122,14 +141,15 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
- m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
+
+ m->compute_static_thread_mgmt_se0 = wa_mask;
+ m->compute_static_thread_mgmt_se1 = wa_mask;
+ m->compute_static_thread_mgmt_se2 = wa_mask;
+ m->compute_static_thread_mgmt_se3 = wa_mask;
+ m->compute_static_thread_mgmt_se4 = wa_mask;
+ m->compute_static_thread_mgmt_se5 = wa_mask;
+ m->compute_static_thread_mgmt_se6 = wa_mask;
+ m->compute_static_thread_mgmt_se7 = wa_mask;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
@@ -143,6 +163,11 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+ /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
+ * DISPATCH_PTR. This is required for the kfd debugger
+ */
+ m->cp_hqd_hq_status0 = 1 << 14;
+
/*
* GFX11 RS64 CPFW version >= 509 supports PCIe atomics support
* acknowledgment.
@@ -155,7 +180,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
}
- if (mm->dev->cwsr_enabled) {
+ if (mm->dev->kfd->cwsr_enabled) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
@@ -184,7 +209,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
- wptr_shift, 0, mms);
+ wptr_shift, 0, mms, 0);
return r;
}
@@ -243,7 +268,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_doorbell_control |=
1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
}
- if (mm->dev->cwsr_enabled)
+ if (mm->dev->kfd->cwsr_enabled)
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, minfo);
@@ -260,12 +285,13 @@ static uint32_t read_doorbell_id(void *mqd)
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct v11_compute_mqd *m;
- /*struct mqd_user_context_save_area_header header;*/
+ struct kfd_context_save_area_header header;
m = get_mqd(mqd);
@@ -283,16 +309,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
* it's part of the context save area that is already
* accessible to user mode
*/
-/*
- header.control_stack_size = *ctl_stack_used_size;
- header.wave_state_size = *save_area_used_size;
+ header.wave_state.control_stack_size = *ctl_stack_used_size;
+ header.wave_state.wave_state_size = *save_area_used_size;
- header.wave_state_offset = m->cp_hqd_wg_state_offset;
- header.control_stack_offset = m->cp_hqd_cntl_stack_offset;
+ header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
+ header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
- if (copy_to_user(ctl_stack, &header, sizeof(header)))
+ if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
return -EFAULT;
-*/
+
return 0;
}
@@ -310,6 +335,26 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
+static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type, unsigned int timeout,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ int err;
+ struct v11_compute_mqd *m;
+ u32 doorbell_off;
+
+ m = get_mqd(mqd);
+
+ doorbell_off = m->cp_hqd_pq_doorbell_control >>
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+
+ err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
+ if (err)
+ pr_debug("Destroy HIQ MQD failed: %d\n", err);
+
+ return err;
+}
+
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -319,7 +364,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr;
- if (mm->dev->shared_resources.enable_mes)
+ if (mm->dev->kfd->shared_resources.enable_mes)
size = PAGE_SIZE;
else
size = sizeof(struct v11_sdma_mqd);
@@ -387,7 +432,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
#endif
struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
@@ -424,7 +469,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->destroy_mqd = destroy_hiq_mqd;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
#if defined(CONFIG_DEBUG_FS)
@@ -463,7 +508,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
* To allocate SDMA MQDs by generic functions
* when MES is enabled.
*/
- if (dev->shared_resources.enable_mes) {
+ if (dev->kfd->shared_resources.enable_mes) {
mqd->allocate_mqd = allocate_mqd;
mqd->free_mqd = kfd_free_mqd_cp;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index fdbfd725841f..e23d32f35607 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -32,6 +32,22 @@
#include "gc/gc_9_0_sh_mask.h"
#include "sdma0/sdma0_4_0_sh_mask.h"
#include "amdgpu_amdkfd.h"
+#include "kfd_device_queue_manager.h"
+
+static void update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
+ struct mqd_update_info *minfo);
+
+static uint64_t mqd_stride_v9(struct mqd_manager *mm,
+ struct queue_properties *q)
+{
+ if (mm->dev->kfd->cwsr_enabled &&
+ q->type == KFD_QUEUE_TYPE_COMPUTE)
+ return ALIGN(q->ctl_stack_size, PAGE_SIZE) +
+ ALIGN(sizeof(struct v9_mqd), PAGE_SIZE);
+
+ return mm->mqd_size;
+}
static inline struct v9_mqd *get_mqd(void *mqd)
{
@@ -49,8 +65,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct v9_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
- if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
- !minfo->cu_mask.ptr)
+ if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
@@ -83,7 +98,7 @@ static void set_priority(struct v9_mqd *m, struct queue_properties *q)
m->cp_hqd_queue_priority = q->priority;
}
-static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node,
struct queue_properties *q)
{
int retval;
@@ -105,28 +120,30 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
* pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
* amdgpu memory functions to do so.
*/
- if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
+ if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!mqd_mem_obj)
return NULL;
- retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->adev,
- ALIGN(q->ctl_stack_size, PAGE_SIZE) +
- ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
+ retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev,
+ (ALIGN(q->ctl_stack_size, PAGE_SIZE) +
+ ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) *
+ NUM_XCC(node->xcc_mask),
&(mqd_mem_obj->gtt_mem),
&(mqd_mem_obj->gpu_addr),
(void *)&(mqd_mem_obj->cpu_ptr), true);
+
+ if (retval) {
+ kfree(mqd_mem_obj);
+ return NULL;
+ }
} else {
- retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd),
+ retval = kfd_gtt_sa_allocate(node, sizeof(struct v9_mqd),
&mqd_mem_obj);
- }
-
- if (retval) {
- kfree(mqd_mem_obj);
- return NULL;
+ if (retval)
+ return NULL;
}
return mqd_mem_obj;
-
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
@@ -135,7 +152,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
{
uint64_t addr;
struct v9_mqd *m;
- struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev;
m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
@@ -165,31 +181,21 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
- if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
+ * DISPATCH_PTR. This is required for the kfd debugger
+ */
+ m->cp_hqd_hq_status0 = 1 << 14;
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
- /* On GC 9.4.3, DW 41 is re-purposed as
- * compute_tg_chunk_size.
- * TODO: review this setting when active CUs in the
- * partition play a role
- */
- m->compute_static_thread_mgmt_se6 = 1;
- }
- } else {
- /* PM4 queue */
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
- m->compute_static_thread_mgmt_se6 = 0;
- /* TODO: program pm4_target_xcc */
- }
- }
if (q->tba_addr) {
m->compute_pgm_rsrc2 |=
(1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
}
- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
+ if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
@@ -205,7 +211,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
*mqd = m;
if (gart_addr)
*gart_addr = addr;
- mm->update_mqd(mm, m, q, NULL);
+ update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
@@ -217,14 +223,13 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
- wptr_shift, 0, mms);
+ wptr_shift, 0, mms, 0);
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev;
struct v9_mqd *m;
m = get_mqd(mqd);
@@ -257,9 +262,14 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
* Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
* more than (EOP entry count - 1) so a queue size of 0x800 dwords
* is safe, giving a maximum field value of 0xA.
+ *
+ * Also, do calculation only if EOP is used (size > 0), otherwise
+ * the order_base_2 calculation provides incorrect result.
+ *
*/
- m->cp_hqd_eop_control = min(0xA,
- order_base_2(q->eop_ring_buffer_size / 4) - 1);
+ m->cp_hqd_eop_control = q->eop_ring_buffer_size ?
+ min(0xA, order_base_2(q->eop_ring_buffer_size / 4) - 1) : 0;
+
m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi =
@@ -270,17 +280,14 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
- m->cp_hqd_pq_control |=
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
- if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
- m->cp_hqd_pq_control |=
- CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
m->cp_hqd_pq_doorbell_control |= 1 <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
}
- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
+ if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, minfo);
@@ -298,11 +305,13 @@ static uint32_t read_doorbell_id(void *mqd)
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct v9_mqd *m;
+ struct kfd_context_save_area_header header;
/* Control stack is located one page after MQD. */
void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
@@ -314,7 +323,18 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
*save_area_used_size = m->cp_hqd_wg_state_offset -
m->cp_hqd_cntl_stack_size;
- if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
+ header.wave_state.control_stack_size = *ctl_stack_used_size;
+ header.wave_state.wave_state_size = *save_area_used_size;
+
+ header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
+ header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
+
+ if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
+ return -EFAULT;
+
+ if (copy_to_user(ctl_stack + m->cp_hqd_cntl_stack_offset,
+ mqd_ctl_stack + m->cp_hqd_cntl_stack_offset,
+ *ctl_stack_used_size))
return -EFAULT;
return 0;
@@ -385,6 +405,25 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
+static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type, unsigned int timeout,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ int err;
+ struct v9_mqd *m;
+ u32 doorbell_off;
+
+ m = get_mqd(mqd);
+
+ doorbell_off = m->cp_hqd_pq_doorbell_control >>
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+ err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
+ if (err)
+ pr_debug("Destroy HIQ MQD failed: %d\n", err);
+
+ return err;
+}
+
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -467,6 +506,291 @@ static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
qp->is_active = 0;
}
+static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ struct v9_mqd *m;
+ int xcc = 0;
+ struct kfd_mem_obj xcc_mqd_mem_obj;
+ uint64_t xcc_gart_addr = 0;
+
+ memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc);
+
+ init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
+
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
+ 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
+ 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
+ m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev);
+ if (xcc == 0) {
+ /* Set no_update_rptr = 0 in Master XCC */
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
+
+ /* Set the MQD pointer and gart address to XCC0 MQD */
+ *mqd = m;
+ *gart_addr = xcc_gart_addr;
+ }
+ }
+}
+
+static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ uint32_t xcc_mask = mm->dev->xcc_mask;
+ int xcc_id, err, inst = 0;
+ void *xcc_mqd;
+ uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
+
+ for_each_inst(xcc_id, xcc_mask) {
+ xcc_mqd = mqd + hiq_mqd_size * inst;
+ err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd,
+ pipe_id, queue_id,
+ p->doorbell_off, xcc_id);
+ if (err) {
+ pr_debug("Failed to load HIQ MQD for XCC: %d\n", inst);
+ break;
+ }
+ ++inst;
+ }
+
+ return err;
+}
+
+static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type, unsigned int timeout,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ uint32_t xcc_mask = mm->dev->xcc_mask;
+ int xcc_id, err, inst = 0;
+ uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
+ struct v9_mqd *m;
+ u32 doorbell_off;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ m = get_mqd(mqd + hiq_mqd_size * inst);
+
+ doorbell_off = m->cp_hqd_pq_doorbell_control >>
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+
+ err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, xcc_id);
+ if (err) {
+ pr_debug("Destroy HIQ MQD failed for xcc: %d\n", inst);
+ break;
+ }
+ ++inst;
+ }
+
+ return err;
+}
+
+static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj,
+ struct kfd_mem_obj *xcc_mqd_mem_obj,
+ uint64_t offset)
+{
+ xcc_mqd_mem_obj->gtt_mem = (offset == 0) ?
+ mqd_mem_obj->gtt_mem : NULL;
+ xcc_mqd_mem_obj->gpu_addr = mqd_mem_obj->gpu_addr + offset;
+ xcc_mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)mqd_mem_obj->cpu_ptr
+ + offset);
+}
+
+static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ struct v9_mqd *m;
+ int xcc = 0;
+ struct kfd_mem_obj xcc_mqd_mem_obj;
+ uint64_t xcc_gart_addr = 0;
+ uint64_t xcc_ctx_save_restore_area_address;
+ uint64_t offset = mm->mqd_stride(mm, q);
+ uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++;
+
+ memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc);
+
+ init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
+
+ m->cp_mqd_stride_size = offset;
+
+ /*
+ * Update the CWSR address for each XCC if CWSR is enabled
+ * and CWSR area is allocated in thunk
+ */
+ if (mm->dev->kfd->cwsr_enabled &&
+ q->ctx_save_restore_area_address) {
+ xcc_ctx_save_restore_area_address =
+ q->ctx_save_restore_area_address +
+ (xcc * q->ctx_save_restore_area_size);
+
+ m->cp_hqd_ctx_save_base_addr_lo =
+ lower_32_bits(xcc_ctx_save_restore_area_address);
+ m->cp_hqd_ctx_save_base_addr_hi =
+ upper_32_bits(xcc_ctx_save_restore_area_address);
+ }
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ m->compute_tg_chunk_size = 1;
+ m->compute_current_logic_xcc_id =
+ (local_xcc_start + xcc) %
+ NUM_XCC(mm->dev->xcc_mask);
+
+ switch (xcc) {
+ case 0:
+ /* Master XCC */
+ m->cp_hqd_pq_control &=
+ ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* PM4 Queue */
+ m->compute_current_logic_xcc_id = 0;
+ m->compute_tg_chunk_size = 0;
+ m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
+ }
+
+ if (xcc == 0) {
+ /* Set the MQD pointer and gart address to XCC0 MQD */
+ *mqd = m;
+ *gart_addr = xcc_gart_addr;
+ }
+ }
+}
+
+static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q, struct mqd_update_info *minfo)
+{
+ struct v9_mqd *m;
+ int xcc = 0;
+ uint64_t size = mm->mqd_stride(mm, q);
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ m = get_mqd(mqd + size * xcc);
+ update_mqd(mm, m, q, minfo);
+
+ if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ switch (xcc) {
+ case 0:
+ /* Master XCC */
+ m->cp_hqd_pq_control &=
+ ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
+ break;
+ default:
+ break;
+ }
+ m->compute_tg_chunk_size = 1;
+ } else {
+ /* PM4 Queue */
+ m->compute_current_logic_xcc_id = 0;
+ m->compute_tg_chunk_size = 0;
+ m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
+ }
+ }
+}
+
+static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type, unsigned int timeout,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ uint32_t xcc_mask = mm->dev->xcc_mask;
+ int xcc_id, err, inst = 0;
+ void *xcc_mqd;
+ struct v9_mqd *m;
+ uint64_t mqd_offset;
+
+ m = get_mqd(mqd);
+ mqd_offset = m->cp_mqd_stride_size;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ xcc_mqd = mqd + mqd_offset * inst;
+ err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd,
+ type, timeout, pipe_id,
+ queue_id, xcc_id);
+ if (err) {
+ pr_debug("Destroy MQD failed for xcc: %d\n", inst);
+ break;
+ }
+ ++inst;
+ }
+
+ return err;
+}
+
+static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
+ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
+ uint32_t xcc_mask = mm->dev->xcc_mask;
+ int xcc_id, err, inst = 0;
+ void *xcc_mqd;
+ uint64_t mqd_stride_size = mm->mqd_stride(mm, p);
+
+ for_each_inst(xcc_id, xcc_mask) {
+ xcc_mqd = mqd + mqd_stride_size * inst;
+ err = mm->dev->kfd2kgd->hqd_load(
+ mm->dev->adev, xcc_mqd, pipe_id, queue_id,
+ (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms,
+ xcc_id);
+ if (err) {
+ pr_debug("Load MQD failed for xcc: %d\n", inst);
+ break;
+ }
+ ++inst;
+ }
+
+ return err;
+}
+
+static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
+ void __user *ctl_stack,
+ u32 *ctl_stack_used_size,
+ u32 *save_area_used_size)
+{
+ int xcc, err = 0;
+ void *xcc_mqd;
+ void __user *xcc_ctl_stack;
+ uint64_t mqd_stride_size = mm->mqd_stride(mm, q);
+ u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0;
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ xcc_mqd = mqd + mqd_stride_size * xcc;
+ xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack +
+ q->ctx_save_restore_area_size * xcc);
+
+ err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack,
+ &tmp_ctl_stack_used_size,
+ &tmp_save_area_used_size);
+ if (err)
+ break;
+
+ /*
+ * Set the ctl_stack_used_size and save_area_used_size to
+ * ctl_stack_used_size and save_area_used_size of XCC 0 when
+ * passing the info the user-space.
+ * For multi XCC, user-space would have to look at the header
+ * info of each Control stack area to determine the control
+ * stack size and save area used.
+ */
+ if (xcc == 0) {
+ *ctl_stack_used_size = tmp_ctl_stack_used_size;
+ *save_area_used_size = tmp_save_area_used_size;
+ }
+ }
+
+ return err;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int debugfs_show_mqd(struct seq_file *m, void *data)
@@ -486,7 +810,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
#endif
struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
@@ -502,34 +826,50 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
mqd->allocate_mqd = allocate_mqd;
- mqd->init_mqd = init_mqd;
mqd->free_mqd = kfd_free_mqd_cp;
- mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
- mqd->get_wave_state = get_wave_state;
mqd->get_checkpoint_info = get_checkpoint_info;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct v9_mqd);
+ mqd->mqd_stride = mqd_stride_v9;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
+ if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) {
+ mqd->init_mqd = init_mqd_v9_4_3;
+ mqd->load_mqd = load_mqd_v9_4_3;
+ mqd->update_mqd = update_mqd_v9_4_3;
+ mqd->destroy_mqd = destroy_mqd_v9_4_3;
+ mqd->get_wave_state = get_wave_state_v9_4_3;
+ } else {
+ mqd->init_mqd = init_mqd;
+ mqd->load_mqd = load_mqd;
+ mqd->update_mqd = update_mqd;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->get_wave_state = get_wave_state;
+ }
break;
case KFD_MQD_TYPE_HIQ:
mqd->allocate_mqd = allocate_hiq_mqd;
- mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v9_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
mqd->read_doorbell_id = read_doorbell_id;
+ if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) {
+ mqd->init_mqd = init_mqd_hiq_v9_4_3;
+ mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3;
+ mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3;
+ } else {
+ mqd->init_mqd = init_mqd_hiq;
+ mqd->load_mqd = kfd_hiq_load_mqd_kiq;
+ mqd->destroy_mqd = destroy_hiq_mqd;
+ }
break;
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
@@ -555,6 +895,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct v9_sdma_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 530ba6f5b57e..657c37822980 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -51,8 +51,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct vi_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
- !minfo->cu_mask.ptr)
+ if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
@@ -77,7 +76,7 @@ static void set_priority(struct vi_mqd *m, struct queue_properties *q)
m->cp_hqd_queue_priority = q->priority;
}
-static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
+static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
@@ -136,7 +135,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
(1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
}
- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
+ if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
@@ -165,7 +164,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
- wptr_shift, wptr_mask, mms);
+ wptr_shift, wptr_mask, mms, 0);
}
static void __update_mqd(struct mqd_manager *mm, void *mqd,
@@ -227,7 +226,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
}
- if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
+ if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control =
atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
@@ -238,14 +237,6 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-
-static void update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q,
- struct mqd_update_info *minfo)
-{
- __update_mqd(mm, mqd, q, minfo, MTYPE_CC, 1);
-}
-
static uint32_t read_doorbell_id(void *mqd)
{
struct vi_mqd *m = (struct vi_mqd *)mqd;
@@ -253,14 +244,15 @@ static uint32_t read_doorbell_id(void *mqd)
return m->queue_doorbell_id0;
}
-static void update_mqd_tonga(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q,
- struct mqd_update_info *minfo)
+static void update_mqd(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
__update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
@@ -446,7 +438,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
#endif
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
+ struct kfd_node *dev)
{
struct mqd_manager *mqd;
@@ -486,6 +478,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct vi_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -500,6 +493,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct vi_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -515,6 +509,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct vi_sdma_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
@@ -526,16 +521,3 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
return mqd;
}
-
-struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev)
-{
- struct mqd_manager *mqd;
-
- mqd = mqd_manager_init_vi(type, dev);
- if (!mqd)
- return NULL;
- if (type == KFD_MQD_TYPE_CP)
- mqd->update_mqd = update_mqd_tonga;
- return mqd;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index f612325241aa..401096c103b2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -45,7 +45,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
unsigned int max_proc_per_quantum = 1;
- struct kfd_dev *dev = pm->dqm->dev;
+ struct kfd_node *dev = pm->dqm->dev;
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->active_queue_count;
@@ -370,6 +370,38 @@ out:
return retval;
}
+int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
+{
+ int retval = 0;
+ uint32_t *buffer, size;
+
+ size = pm->pmf->set_grace_period_size;
+
+ mutex_lock(&pm->lock);
+
+ if (size) {
+ kq_acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t),
+ (unsigned int **)&buffer);
+
+ if (!buffer) {
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
+ if (!retval)
+ kq_submit_packet(pm->priv_queue);
+ else
+ kq_rollback_packet(pm->priv_queue);
+ }
+
+out:
+ mutex_unlock(&pm->lock);
+ return retval;
+}
+
int pm_send_unmap_queue(struct packet_manager *pm,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 18250845a989..8ce6f5200905 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -34,6 +34,9 @@ static int pm_map_process_v9(struct packet_manager *pm,
{
struct pm4_mes_map_process *packet;
uint64_t vm_page_table_base_addr = qpd->page_table_base;
+ struct kfd_node *kfd = pm->dqm->dev;
+ struct kfd_process_device *pdd =
+ container_of(qpd, struct kfd_process_device, qpd);
packet = (struct pm4_mes_map_process *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
@@ -49,6 +52,12 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->bitfields14.sdma_enable = 1;
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+ if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled &&
+ pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) {
+ packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid;
+ packet->bitfields2.new_debug = 1;
+ }
+
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
if (qpd->tba_addr) {
@@ -79,6 +88,10 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
{
struct pm4_mes_map_process_aldebaran *packet;
uint64_t vm_page_table_base_addr = qpd->page_table_base;
+ struct kfd_dev *kfd = pm->dqm->dev->kfd;
+ struct kfd_process_device *pdd =
+ container_of(qpd, struct kfd_process_device, qpd);
+ int i;
packet = (struct pm4_mes_map_process_aldebaran *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
@@ -93,11 +106,22 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
packet->bitfields14.num_oac = qpd->num_oac;
packet->bitfields14.sdma_enable = 1;
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+ packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override |
+ pdd->spi_dbg_launch_mode;
+
+ if (pdd->process->debug_trap_enabled) {
+ for (i = 0; i < kfd->device_info.num_of_watch_points; i++)
+ packet->tcp_watch_cntl[i] = pdd->watch_points[i];
+
+ packet->bitfields2.single_memops =
+ !!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP);
+ }
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
if (qpd->tba_addr) {
packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
+ packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8);
packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
}
@@ -119,7 +143,7 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
struct pm4_mes_runlist *packet;
int concurrent_proc_cnt = 0;
- struct kfd_dev *kfd = pm->dqm->dev;
+ struct kfd_node *kfd = pm->dqm->dev;
/* Determine the number of processes to map together to HW:
* it can not exceed the number of VMIDs available to the
@@ -220,13 +244,24 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
use_static = false; /* no static queues under SDMA */
- if (q->properties.sdma_engine_id < 2 && !pm_use_ext_eng(q->device))
+ if (q->properties.sdma_engine_id < 2 &&
+ !pm_use_ext_eng(q->device->kfd))
packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
engine_sel__mes_map_queues__sdma0_vi;
else {
- packet->bitfields2.extended_engine_sel =
- extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
- packet->bitfields2.engine_sel = q->properties.sdma_engine_id;
+ /*
+ * For GFX9.4.3, SDMA engine id can be greater than 8.
+ * For such cases, set extended_engine_sel to 2 and
+ * ensure engine_sel lies between 0-7.
+ */
+ if (q->properties.sdma_engine_id >= 8)
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_map_queues__sdma8_to_15_sel;
+ else
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
+
+ packet->bitfields2.engine_sel = q->properties.sdma_engine_id % 8;
}
break;
default:
@@ -251,6 +286,42 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
+static int pm_set_grace_period_v9(struct packet_manager *pm,
+ uint32_t *buffer,
+ uint32_t grace_period)
+{
+ struct pm4_mec_write_data_mmio *packet;
+ uint32_t reg_offset = 0;
+ uint32_t reg_data = 0;
+
+ pm->dqm->dev->kfd2kgd->build_grace_period_packet_info(
+ pm->dqm->dev->adev,
+ pm->dqm->wait_times,
+ grace_period,
+ &reg_offset,
+ &reg_data,
+ 0);
+
+ if (grace_period == USE_DEFAULT_GRACE_PERIOD)
+ reg_data = pm->dqm->wait_times;
+
+ packet = (struct pm4_mec_write_data_mmio *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mec_write_data_mmio));
+
+ packet->header.u32All = pm_build_pm4_header(IT_WRITE_DATA,
+ sizeof(struct pm4_mec_write_data_mmio));
+
+ packet->bitfields2.dst_sel = dst_sel___write_data__mem_mapped_register;
+ packet->bitfields2.addr_incr =
+ addr_incr___write_data__do_not_increment_address;
+
+ packet->bitfields3.dst_mmreg_addr = reg_offset;
+
+ packet->data = reg_data;
+
+ return 0;
+}
+
static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset)
@@ -263,7 +334,8 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_mes_unmap_queues));
- packet->bitfields2.extended_engine_sel = pm_use_ext_eng(pm->dqm->dev) ?
+ packet->bitfields2.extended_engine_sel =
+ pm_use_ext_eng(pm->dqm->dev->kfd) ?
extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel :
extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
@@ -333,6 +405,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = {
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
+ .set_grace_period = pm_set_grace_period_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process),
@@ -340,6 +413,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
+ .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
@@ -350,6 +424,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
+ .set_grace_period = pm_set_grace_period_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process_aldebaran),
@@ -357,6 +432,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
+ .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index 4f951eaa6ee8..c1199d06d131 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -77,7 +77,7 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
{
struct pm4_mes_runlist *packet;
int concurrent_proc_cnt = 0;
- struct kfd_dev *kfd = pm->dqm->dev;
+ struct kfd_node *kfd = pm->dqm->dev;
if (WARN_ON(!ib))
return -EFAULT;
@@ -303,6 +303,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = {
.set_resources = pm_set_resources_vi,
.map_queues = pm_map_queues_vi,
.unmap_queues = pm_unmap_queues_vi,
+ .set_grace_period = NULL,
.query_status = pm_query_status_vi,
.release_mem = pm_release_mem_vi,
.map_process_size = sizeof(struct pm4_mes_map_process),
@@ -310,6 +311,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
+ .set_grace_period_size = 0,
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = sizeof(struct pm4_mec_release_mem)
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index a666710ed403..8b6b2bd5c148 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -146,7 +146,10 @@ struct pm4_mes_map_process {
union {
struct {
uint32_t pasid:16;
- uint32_t reserved1:8;
+ uint32_t reserved1:2;
+ uint32_t debug_vmid:4;
+ uint32_t new_debug:1;
+ uint32_t reserved2:1;
uint32_t diq_enable:1;
uint32_t process_quantum:7;
} bitfields2;
@@ -263,7 +266,8 @@ enum mes_map_queues_engine_sel_enum {
enum mes_map_queues_extended_engine_sel_enum {
extended_engine_sel__mes_map_queues__legacy_engine_sel = 0,
- extended_engine_sel__mes_map_queues__sdma0_to_7_sel = 1
+ extended_engine_sel__mes_map_queues__sdma0_to_7_sel = 1,
+ extended_engine_sel__mes_map_queues__sdma8_to_15_sel = 2
};
struct pm4_mes_map_queues {
@@ -583,6 +587,71 @@ struct pm4_mec_release_mem {
#endif
+#ifndef PM4_MEC_WRITE_DATA_DEFINED
+#define PM4_MEC_WRITE_DATA_DEFINED
+
+enum WRITE_DATA_dst_sel_enum {
+ dst_sel___write_data__mem_mapped_register = 0,
+ dst_sel___write_data__tc_l2 = 2,
+ dst_sel___write_data__gds = 3,
+ dst_sel___write_data__memory = 5,
+ dst_sel___write_data__memory_mapped_adc_persistent_state = 6,
+};
+
+enum WRITE_DATA_addr_incr_enum {
+ addr_incr___write_data__increment_address = 0,
+ addr_incr___write_data__do_not_increment_address = 1
+};
+
+enum WRITE_DATA_wr_confirm_enum {
+ wr_confirm___write_data__do_not_wait_for_write_confirmation = 0,
+ wr_confirm___write_data__wait_for_write_confirmation = 1
+};
+
+enum WRITE_DATA_cache_policy_enum {
+ cache_policy___write_data__lru = 0,
+ cache_policy___write_data__stream = 1
+};
+
+
+struct pm4_mec_write_data_mmio {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /*header */
+ unsigned int ordinal1;
+ };
+
+ union {
+ struct {
+ unsigned int reserved1:8;
+ unsigned int dst_sel:4;
+ unsigned int reserved2:4;
+ unsigned int addr_incr:1;
+ unsigned int reserved3:2;
+ unsigned int resume_vf:1;
+ unsigned int wr_confirm:1;
+ unsigned int reserved4:4;
+ unsigned int cache_policy:2;
+ unsigned int reserved5:5;
+ } bitfields2;
+ unsigned int ordinal2;
+ };
+
+ union {
+ struct {
+ unsigned int dst_mmreg_addr:18;
+ unsigned int reserved6:14;
+ } bitfields3;
+ unsigned int ordinal3;
+ };
+
+ uint32_t reserved7;
+
+ uint32_t data;
+
+};
+
+#endif
+
enum {
CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 94a438956868..3d9ce44d88da 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -113,6 +113,8 @@
#define KFD_UNMAP_LATENCY_MS (4000)
+#define KFD_MAX_SDMA_QUEUES 128
+
/*
* 512 = 0x200
* The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the
@@ -173,12 +175,6 @@ extern int send_sigterm;
*/
extern int debug_largebar;
-/*
- * Ignore CRAT table during KFD initialization, can be used to work around
- * broken CRAT tables on some AMD systems
- */
-extern int ignore_crat;
-
/* Set sh_mem_config.retry_disable on GFX v9 */
extern int amdgpu_noretry;
@@ -199,6 +195,8 @@ extern int amdgpu_no_queue_eviction_on_vm_fault;
/* Enable eviction debug messages */
extern bool debug_evictions;
+extern struct mutex kfd_processes_mutex;
+
enum cache_policy {
cache_policy_coherent,
cache_policy_noncoherent
@@ -210,11 +208,13 @@ enum cache_policy {
((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)))
+struct kfd_node;
+
struct kfd_event_interrupt_class {
- bool (*interrupt_isr)(struct kfd_dev *dev,
+ bool (*interrupt_isr)(struct kfd_node *dev,
const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
bool *patched_flag);
- void (*interrupt_wq)(struct kfd_dev *dev,
+ void (*interrupt_wq)(struct kfd_node *dev,
const uint32_t *ih_ring_entry);
};
@@ -228,16 +228,15 @@ struct kfd_device_info {
uint8_t num_of_watch_points;
uint16_t mqd_size_aligned;
bool supports_cwsr;
- bool needs_iommu_device;
bool needs_pci_atomics;
uint32_t no_atomic_fw_version;
unsigned int num_sdma_queues_per_engine;
unsigned int num_reserved_sdma_queues_per_engine;
- uint64_t reserved_sdma_queues_bitmap;
+ DECLARE_BITMAP(reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES);
};
-unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev);
-unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev);
+unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev);
+unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev);
struct kfd_mem_obj {
uint32_t range_start;
@@ -253,34 +252,78 @@ struct kfd_vmid_info {
uint32_t vmid_num_kfd;
};
+#define MAX_KFD_NODES 8
+
+struct kfd_dev;
+
+struct kfd_node {
+ unsigned int node_id;
+ struct amdgpu_device *adev; /* Duplicated here along with keeping
+ * a copy in kfd_dev to save a hop
+ */
+ const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with
+ * keeping a copy in kfd_dev to
+ * save a hop
+ */
+ struct kfd_vmid_info vm_info;
+ unsigned int id; /* topology stub index */
+ uint32_t xcc_mask; /* Instance mask of XCCs present */
+ struct amdgpu_xcp *xcp;
+
+ /* Interrupts */
+ struct kfifo ih_fifo;
+ struct workqueue_struct *ih_wq;
+ struct work_struct interrupt_work;
+ spinlock_t interrupt_lock;
+
+ /*
+ * Interrupts of interest to KFD are copied
+ * from the HW ring into a SW ring.
+ */
+ bool interrupts_active;
+ uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */
+
+ /* QCM Device instance */
+ struct device_queue_manager *dqm;
+
+ /* Global GWS resource shared between processes */
+ void *gws;
+ bool gws_debug_workaround;
+
+ /* Clients watching SMI events */
+ struct list_head smi_clients;
+ spinlock_t smi_lock;
+ uint32_t reset_seq_num;
+
+ /* SRAM ECC flag */
+ atomic_t sram_ecc_flag;
+
+ /*spm process id */
+ unsigned int spm_pasid;
+
+ /* Maximum process number mapped to HW scheduler */
+ unsigned int max_proc_per_quantum;
+
+ unsigned int compute_vmid_bitmap;
+
+ struct kfd_local_mem_info local_mem_info;
+
+ struct kfd_dev *kfd;
+};
+
struct kfd_dev {
struct amdgpu_device *adev;
struct kfd_device_info device_info;
- unsigned int id; /* topology stub index */
-
- phys_addr_t doorbell_base; /* Start of actual doorbells used by
- * KFD. It is aligned for mapping
- * into user mode
- */
- size_t doorbell_base_dw_offset; /* Offset from the start of the PCI
- * doorbell BAR to the first KFD
- * doorbell in dwords. GFX reserves
- * the segment before this offset.
- */
u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
* page used by kernel queue
*/
struct kgd2kfd_shared_resources shared_resources;
- struct kfd_vmid_info vm_info;
- struct kfd_local_mem_info local_mem_info;
const struct kfd2kgd_calls *kfd2kgd;
struct mutex doorbell_mutex;
- DECLARE_BITMAP(doorbell_available_index,
- KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
void *gtt_mem;
uint64_t gtt_start_gpu_addr;
@@ -290,30 +333,13 @@ struct kfd_dev {
unsigned int gtt_sa_chunk_size;
unsigned int gtt_sa_num_of_chunks;
- /* Interrupts */
- struct kfifo ih_fifo;
- struct workqueue_struct *ih_wq;
- struct work_struct interrupt_work;
- spinlock_t interrupt_lock;
-
- /* QCM Device instance */
- struct device_queue_manager *dqm;
-
bool init_complete;
- /*
- * Interrupts of interest to KFD are copied
- * from the HW ring into a SW ring.
- */
- bool interrupts_active;
/* Firmware versions */
uint16_t mec_fw_version;
uint16_t mec2_fw_version;
uint16_t sdma_fw_version;
- /* Maximum process number mapped to HW scheduler */
- unsigned int max_proc_per_quantum;
-
/* CWSR */
bool cwsr_enabled;
const void *cwsr_isa;
@@ -324,31 +350,26 @@ struct kfd_dev {
bool pci_atomic_requested;
- /* Use IOMMU v2 flag */
- bool use_iommu_v2;
-
- /* SRAM ECC flag */
- atomic_t sram_ecc_flag;
-
/* Compute Profile ref. count */
atomic_t compute_profile;
- /* Global GWS resource shared between processes */
- void *gws;
-
- /* Clients watching SMI events */
- struct list_head smi_clients;
- spinlock_t smi_lock;
-
- uint32_t reset_seq_num;
-
struct ida doorbell_ida;
unsigned int max_doorbell_slices;
int noretry;
- /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
- struct dev_pagemap pgmap;
+ struct kfd_node *nodes[MAX_KFD_NODES];
+ unsigned int num_nodes;
+
+ /* Track per device allocated watch points */
+ uint32_t alloc_watch_ids;
+ spinlock_t watch_points_lock;
+
+ /* Kernel doorbells for KFD device */
+ struct amdgpu_bo *doorbells;
+
+ /* bitmap for dynamic doorbell allocation from doorbell object */
+ unsigned long *doorbell_bitmap;
};
enum kfd_mempool {
@@ -478,8 +499,13 @@ struct queue_properties {
uint32_t doorbell_off;
bool is_interop;
bool is_evicted;
+ bool is_suspended;
+ bool is_being_destroyed;
bool is_active;
bool is_gws;
+ uint32_t pm4_target_xcc;
+ bool is_dbg_wa;
+ bool is_user_cu_masked;
/* Not relevant for user mode queues in cp scheduling */
unsigned int vmid;
/* Relevant only for sdma queues*/
@@ -494,15 +520,18 @@ struct queue_properties {
uint32_t ctl_stack_size;
uint64_t tba_addr;
uint64_t tma_addr;
+ uint64_t exception_status;
};
#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \
(q).queue_address != 0 && \
(q).queue_percent > 0 && \
- !(q).is_evicted)
+ !(q).is_evicted && \
+ !(q).is_suspended)
enum mqd_update_flag {
- UPDATE_FLAG_CU_MASK = 0,
+ UPDATE_FLAG_DBG_WA_ENABLE = 1,
+ UPDATE_FLAG_DBG_WA_DISABLE = 2,
};
struct mqd_update_info {
@@ -563,7 +592,7 @@ struct queue {
unsigned int doorbell_id;
struct kfd_process *process;
- struct kfd_dev *device;
+ struct kfd_node *device;
void *gws;
/* procfs */
@@ -658,7 +687,10 @@ struct qcm_process_device {
uint64_t ib_base;
void *ib_kaddr;
- /* doorbell resources per process per device */
+ /* doorbells for kfd process */
+ struct amdgpu_bo *proc_doorbells;
+
+ /* bitmap for dynamic doorbell allocation from the bo */
unsigned long *doorbell_bitmap;
};
@@ -697,7 +729,7 @@ enum kfd_pdd_bound {
/* Data that is per-process-per device. */
struct kfd_process_device {
/* The device that owns this data. */
- struct kfd_dev *dev;
+ struct kfd_node *dev;
/* The process that owns this kfd_process_device. */
struct kfd_process *process;
@@ -748,7 +780,6 @@ struct kfd_process_device {
struct attribute attr_evict;
struct kobject *kobj_stats;
- unsigned int doorbell_index;
/*
* @cu_occupancy: Reports occupancy of Compute Units (CU) of a process
@@ -783,6 +814,18 @@ struct kfd_process_device {
uint64_t faults;
uint64_t page_in;
uint64_t page_out;
+
+ /* Exception code status*/
+ uint64_t exception_status;
+ void *vm_fault_exc_data;
+ size_t vm_fault_exc_data_size;
+
+ /* Tracks debug per-vmid request settings */
+ uint32_t spi_dbg_override;
+ uint32_t spi_dbg_launch_mode;
+ uint32_t watch_points[4];
+ uint32_t alloc_watch_ids;
+
/*
* If this process has been checkpointed before, then the user
* application will use the original gpu_id on the
@@ -887,19 +930,57 @@ struct kfd_process {
*/
unsigned long last_restore_timestamp;
+ /* Indicates device process is debug attached with reserved vmid. */
+ bool debug_trap_enabled;
+
+ /* per-process-per device debug event fd file */
+ struct file *dbg_ev_file;
+
+ /* If the process is a kfd debugger, we need to know so we can clean
+ * up at exit time. If a process enables debugging on itself, it does
+ * its own clean-up, so we don't set the flag here. We track this by
+ * counting the number of processes this process is debugging.
+ */
+ atomic_t debugged_process_count;
+
+ /* If the process is a debugged, this is the debugger process */
+ struct kfd_process *debugger_process;
+
/* Kobj for our procfs */
struct kobject *kobj;
struct kobject *kobj_queues;
struct attribute attr_pasid;
+ /* Keep track cwsr init */
+ bool has_cwsr;
+
+ /* Exception code enable mask and status */
+ uint64_t exception_enable_mask;
+ uint64_t exception_status;
+
+ /* Used to drain stale interrupts */
+ wait_queue_head_t wait_irq_drain;
+ bool irq_drain_is_open;
+
/* shared virtual memory registered by this process */
struct svm_range_list svms;
bool xnack_enabled;
+ /* Work area for debugger event writer worker. */
+ struct work_struct debug_event_workarea;
+
+ /* Tracks debug per-vmid request for debug flags */
+ bool dbg_flags;
+
atomic_t poison;
/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
bool queues_paused;
+
+ /* Tracks runtime enable status */
+ struct semaphore runtime_enable_sema;
+ bool is_runtime_retry;
+ struct kfd_runtime_info runtime_info;
};
#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
@@ -925,20 +1006,19 @@ struct amdkfd_ioctl_desc {
unsigned int cmd_drv;
const char *name;
};
-bool kfd_dev_is_large_bar(struct kfd_dev *dev);
+bool kfd_dev_is_large_bar(struct kfd_node *dev);
int kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
void kfd_cleanup_processes(void);
-struct kfd_process *kfd_create_process(struct file *filep);
+struct kfd_process *kfd_create_process(struct task_struct *thread);
struct kfd_process *kfd_get_process(const struct task_struct *task);
struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id);
-int kfd_process_gpuid_from_adev(struct kfd_process *p,
- struct amdgpu_device *adev, uint32_t *gpuid,
- uint32_t *gpuidx);
+int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
+ uint32_t *gpuid, uint32_t *gpuidx);
static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p,
uint32_t gpuidx, uint32_t *gpuid) {
return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL;
@@ -961,16 +1041,16 @@ int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id);
int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct file *drm_file);
-struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
struct kfd_process *p);
-struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
struct kfd_process *p);
-struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
struct kfd_process *p);
bool kfd_process_xnack_mode(struct kfd_process *p, bool supported);
-int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma);
/* KFD process API for creating and translating handles */
@@ -994,7 +1074,7 @@ void kfd_pasid_free(u32 pasid);
size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
int kfd_doorbell_init(struct kfd_dev *kfd);
void kfd_doorbell_fini(struct kfd_dev *kfd);
-int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
+int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma);
void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
unsigned int *doorbell_off);
@@ -1007,15 +1087,15 @@ unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
unsigned int doorbell_id);
phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd);
int kfd_alloc_process_doorbells(struct kfd_dev *kfd,
- unsigned int *doorbell_index);
+ struct kfd_process_device *pdd);
void kfd_free_process_doorbells(struct kfd_dev *kfd,
- unsigned int doorbell_index);
+ struct kfd_process_device *pdd);
/* GTT Sub-Allocator */
-int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
+int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
struct kfd_mem_obj **mem_obj);
-int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
+int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj);
extern struct device *kfd_device;
@@ -1028,27 +1108,52 @@ void kfd_procfs_del_queue(struct queue *q);
/* Topology */
int kfd_topology_init(void);
void kfd_topology_shutdown(void);
-int kfd_topology_add_device(struct kfd_dev *gpu);
-int kfd_topology_remove_device(struct kfd_dev *gpu);
+int kfd_topology_add_device(struct kfd_node *gpu);
+int kfd_topology_remove_device(struct kfd_node *gpu);
struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
uint32_t proximity_domain);
struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
uint32_t proximity_domain);
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
-struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
-struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev);
-int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
+struct kfd_node *kfd_device_by_id(uint32_t gpu_id);
+struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id,
+ uint32_t vmid)
+{
+ return (node->interrupt_bitmap & (1 << node_id)) != 0 &&
+ (node->compute_vmid_bitmap & (1 << vmid)) != 0;
+}
+static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
+ uint32_t node_id, uint32_t vmid) {
+ struct kfd_dev *dev = adev->kfd.dev;
+ uint32_t i;
+
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
+ return dev->nodes[0];
+
+ for (i = 0; i < dev->num_nodes; i++)
+ if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid))
+ return dev->nodes[i];
+
+ return NULL;
+}
+int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev);
int kfd_numa_node_to_apic_id(int numa_node_id);
-void kfd_double_confirm_iommu_support(struct kfd_dev *gpu);
/* Interrupts */
-int kfd_interrupt_init(struct kfd_dev *dev);
-void kfd_interrupt_exit(struct kfd_dev *dev);
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
-bool interrupt_is_wanted(struct kfd_dev *dev,
+#define KFD_IRQ_FENCE_CLIENTID 0xff
+#define KFD_IRQ_FENCE_SOURCEID 0xff
+#define KFD_IRQ_IS_FENCE(client, source) \
+ ((client) == KFD_IRQ_FENCE_CLIENTID && \
+ (source) == KFD_IRQ_FENCE_SOURCEID)
+int kfd_interrupt_init(struct kfd_node *dev);
+void kfd_interrupt_exit(struct kfd_node *dev);
+bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry);
+bool interrupt_is_wanted(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre, bool *flag);
+int kfd_process_drain_interrupts(struct kfd_process_device *pdd);
+void kfd_process_close_interrupt_drain(unsigned int pasid);
/* amdkfd Apertures */
int kfd_init_apertures(struct kfd_process *process);
@@ -1056,6 +1161,11 @@ int kfd_init_apertures(struct kfd_process *process);
void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
uint64_t tba_addr,
uint64_t tma_addr);
+void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
+ bool enabled);
+
+/* CWSR initialization */
+int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep);
/* CRIU */
/*
@@ -1174,22 +1284,18 @@ void print_queue_properties(struct queue_properties *q);
void print_queue(struct queue *q);
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
-struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
-struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
+ struct kfd_node *dev);
struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
- struct kfd_dev *dev);
-struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
+ struct kfd_node *dev);
+struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev);
void device_queue_manager_uninit(struct device_queue_manager *dqm);
-struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
+struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
@@ -1206,7 +1312,7 @@ void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
void pqm_uninit(struct process_queue_manager *pqm);
int pqm_create_queue(struct process_queue_manager *pqm,
- struct kfd_dev *dev,
+ struct kfd_node *dev,
struct file *f,
struct queue_properties *properties,
unsigned int *qid,
@@ -1231,6 +1337,11 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size);
+int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
+ uint64_t exception_clear_mask,
+ void __user *buf,
+ int *num_qss_entries,
+ uint32_t *entry_size);
int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
uint64_t fence_value,
@@ -1270,6 +1381,8 @@ struct packet_manager_funcs {
int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
enum kfd_unmap_queues_filter mode,
uint32_t filter_param, bool reset);
+ int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer,
+ uint32_t grace_period);
int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
uint64_t fence_address, uint64_t fence_value);
int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
@@ -1280,6 +1393,7 @@ struct packet_manager_funcs {
int set_resources_size;
int map_queues_size;
int unmap_queues_size;
+ int set_grace_period_size;
int query_status_size;
int release_mem_size;
};
@@ -1302,6 +1416,8 @@ int pm_send_unmap_queue(struct packet_manager *pm,
void pm_release_ib(struct packet_manager *pm);
+int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period);
+
/* Following PM funcs can be shared among VI and AI */
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
@@ -1310,6 +1426,8 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
/* Events */
extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
extern const struct kfd_event_interrupt_class event_interrupt_class_v9;
+extern const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3;
+extern const struct kfd_event_interrupt_class event_interrupt_class_v10;
extern const struct kfd_event_interrupt_class event_interrupt_class_v11;
extern const struct kfd_device_global_init_class device_global_init_class_cik;
@@ -1323,9 +1441,6 @@ int kfd_wait_on_events(struct kfd_process *p,
uint32_t *wait_result);
void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint32_t valid_id_bits);
-void kfd_signal_iommu_event(struct kfd_dev *dev,
- u32 pasid, unsigned long address,
- bool is_write_requested, bool is_execute_requested);
void kfd_signal_hw_exception_event(u32 pasid);
int kfd_set_event(struct kfd_process *p, uint32_t event_id);
int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
@@ -1339,32 +1454,36 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
int kfd_get_num_events(struct kfd_process *p);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
-void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
- struct kfd_vm_fault_info *info);
+void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
+ struct kfd_vm_fault_info *info,
+ struct kfd_hsa_memory_exception_data *data);
-void kfd_signal_reset_event(struct kfd_dev *dev);
+void kfd_signal_reset_event(struct kfd_node *dev);
-void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid);
+void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid);
void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{
- return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
- (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) &&
- dev->adev->sdma.instance[0].fw_version >= 18) ||
+ return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
}
+int kfd_send_exception_to_runtime(struct kfd_process *p,
+ unsigned int queue_id,
+ uint64_t error_reason);
bool kfd_is_locked(void);
/* Compute profile */
-void kfd_inc_compute_active(struct kfd_dev *dev);
-void kfd_dec_compute_active(struct kfd_dev *dev);
+void kfd_inc_compute_active(struct kfd_node *dev);
+void kfd_dec_compute_active(struct kfd_node *dev);
/* Cgroup Support */
/* Check with device cgroup if @kfd device is accessible */
-static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
+static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
{
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = adev_to_drm(kfd->adev);
@@ -1377,6 +1496,11 @@ static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
#endif
}
+static inline bool kfd_is_first_node(struct kfd_node *node)
+{
+ return (node == node->kfd->nodes[0]);
+}
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
@@ -1389,7 +1513,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data);
int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
int pm_debugfs_runlist(struct seq_file *m, void *data);
-int kfd_debugfs_hang_hws(struct kfd_dev *dev);
+int kfd_debugfs_hang_hws(struct kfd_node *dev);
int pm_debugfs_hang_hws(struct packet_manager *pm);
int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 07a9eaf9b7d8..fbf053001af9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -28,7 +28,6 @@
#include <linux/sched/task.h>
#include <linux/mmu_context.h>
#include <linux/slab.h>
-#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/compat.h>
#include <linux/mman.h>
@@ -41,16 +40,16 @@ struct mm_struct;
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
-#include "kfd_iommu.h"
#include "kfd_svm.h"
#include "kfd_smi_events.h"
+#include "kfd_debug.h"
/*
* List of struct kfd_process (field kfd_process).
* Unique/indexed by mm_struct*
*/
DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
-static DEFINE_MUTEX(kfd_processes_mutex);
+DEFINE_MUTEX(kfd_processes_mutex);
DEFINE_SRCU(kfd_processes_srcu);
@@ -69,7 +68,6 @@ static struct kfd_process *find_process(const struct task_struct *thread,
bool ref);
static void kfd_process_ref_release(struct kref *ref);
static struct kfd_process *create_process(const struct task_struct *thread);
-static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
static void evict_process_worker(struct work_struct *work);
static void restore_process_worker(struct work_struct *work);
@@ -269,7 +267,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
int cu_cnt;
int wave_cnt;
int max_waves_per_cu;
- struct kfd_dev *dev = NULL;
+ struct kfd_node *dev = NULL;
struct kfd_process *proc = NULL;
struct kfd_process_device *pdd = NULL;
@@ -290,7 +288,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
wave_cnt = 0;
max_waves_per_cu = 0;
dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
- &max_waves_per_cu);
+ &max_waves_per_cu, 0);
/* Translate wave count to number of compute units */
cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
@@ -691,7 +689,7 @@ void kfd_process_destroy_wq(void)
static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_process_device *pdd, void **kptr)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
if (kptr && *kptr) {
amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
@@ -713,7 +711,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
uint64_t gpu_va, uint32_t size,
uint32_t flags, struct kgd_mem **mem, void **kptr)
{
- struct kfd_dev *kdev = pdd->dev;
+ struct kfd_node *kdev = pdd->dev;
int err;
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
@@ -798,18 +796,19 @@ static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
}
-struct kfd_process *kfd_create_process(struct file *filep)
+struct kfd_process *kfd_create_process(struct task_struct *thread)
{
struct kfd_process *process;
- struct task_struct *thread = current;
int ret;
- if (!thread->mm)
+ if (!(thread->mm && mmget_not_zero(thread->mm)))
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
- if (thread->group_leader->mm != thread->mm)
+ if (thread->group_leader->mm != thread->mm) {
+ mmput(thread->mm);
return ERR_PTR(-EINVAL);
+ }
/*
* take kfd processes mutex before starting of process creation
@@ -818,6 +817,12 @@ struct kfd_process *kfd_create_process(struct file *filep)
*/
mutex_lock(&kfd_processes_mutex);
+ if (kfd_is_locked()) {
+ mutex_unlock(&kfd_processes_mutex);
+ pr_debug("KFD is locked! Cannot create process");
+ return ERR_PTR(-EINVAL);
+ }
+
/* A prior open of /dev/kfd could have already created the process. */
process = find_process(thread, false);
if (process) {
@@ -827,10 +832,6 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (IS_ERR(process))
goto out;
- ret = kfd_process_init_cwsr_apu(process, filep);
- if (ret)
- goto out_destroy;
-
if (!procfs.kobj)
goto out;
@@ -859,21 +860,16 @@ struct kfd_process *kfd_create_process(struct file *filep)
kfd_procfs_add_sysfs_stats(process);
kfd_procfs_add_sysfs_files(process);
kfd_procfs_add_sysfs_counters(process);
+
+ init_waitqueue_head(&process->wait_irq_drain);
}
out:
if (!IS_ERR(process))
kref_get(&process->ref);
mutex_unlock(&kfd_processes_mutex);
+ mmput(thread->mm);
return process;
-
-out_destroy:
- hash_del_rcu(&process->kfd_processes);
- mutex_unlock(&kfd_processes_mutex);
- synchronize_srcu(&kfd_processes_srcu);
- /* kfd_process_free_notifier will trigger the cleanup */
- mmu_notifier_put(&process->mmu_notifier);
- return ERR_PTR(ret);
}
struct kfd_process *kfd_get_process(const struct task_struct *thread)
@@ -982,7 +978,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
{
struct kfd_process_device *pdd;
- struct kfd_dev *kdev;
+ struct kfd_node *kdev;
void *mem;
kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
@@ -1037,12 +1033,11 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
get_order(KFD_CWSR_TBA_TMA_SIZE));
- bitmap_free(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr);
- kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
+ kfd_free_process_doorbells(pdd->dev->kfd, pdd);
- if (pdd->dev->shared_resources.enable_mes)
+ if (pdd->dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
pdd->proc_ctx_bo);
/*
@@ -1125,7 +1120,6 @@ static void kfd_process_wq_release(struct work_struct *work)
dma_fence_signal(p->ef);
kfd_process_remove_sysfs(p);
- kfd_iommu_unbind_process(p);
kfd_process_kunmap_signal_bo(p);
kfd_process_free_outstanding_kfd_bos(p);
@@ -1169,11 +1163,40 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
static void kfd_process_notifier_release_internal(struct kfd_process *p)
{
+ int i;
+
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ /* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */
+ if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup)
+ amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
+ }
+
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
+ kfd_dbg_trap_disable(p);
+
+ if (atomic_read(&p->debugged_process_count) > 0) {
+ struct kfd_process *target;
+ unsigned int temp;
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) {
+ if (target->debugger_process && target->debugger_process == p) {
+ mutex_lock_nested(&target->mutex, 1);
+ kfd_dbg_trap_disable(target);
+ mutex_unlock(&target->mutex);
+ if (atomic_read(&p->debugged_process_count) == 0)
+ break;
+ }
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+ }
mmu_notifier_put(&p->mmu_notifier);
}
@@ -1253,16 +1276,19 @@ void kfd_cleanup_processes(void)
mmu_notifier_synchronize();
}
-static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
{
unsigned long offset;
int i;
+ if (p->has_cwsr)
+ return 0;
+
for (i = 0; i < p->n_pdds; i++) {
- struct kfd_dev *dev = p->pdds[i]->dev;
+ struct kfd_node *dev = p->pdds[i]->dev;
struct qcm_process_device *qpd = &p->pdds[i]->qpd;
- if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
+ if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
continue;
offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
@@ -1279,19 +1305,23 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
return err;
}
- memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+ memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
+
+ kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled);
qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
}
+ p->has_cwsr = true;
+
return 0;
}
static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
@@ -1300,7 +1330,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
void *kaddr;
int ret;
- if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
+ if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
return 0;
/* cwsr_base is only set for dGPU */
@@ -1313,7 +1343,10 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
qpd->cwsr_kaddr = kaddr;
qpd->tba_addr = qpd->cwsr_base;
- memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+ memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
+
+ kfd_process_set_trap_debug_flag(&pdd->qpd,
+ pdd->process->debug_trap_enabled);
qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
@@ -1324,10 +1357,10 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
- if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
+ if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
return;
kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
@@ -1371,7 +1404,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
* support retry.
*/
for (i = 0; i < p->n_pdds; i++) {
- struct kfd_dev *dev = p->pdds[i]->dev;
+ struct kfd_node *dev = p->pdds[i]->dev;
/* Only consider GFXv9 and higher GPUs. Older GPUs don't
* support the SVM APIs and don't need to be considered
@@ -1394,13 +1427,23 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
return false;
- if (dev->noretry)
+ if (dev->kfd->noretry)
return false;
}
return true;
}
+void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
+ bool enabled)
+{
+ if (qpd->cwsr_kaddr) {
+ uint64_t *tma =
+ (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
+ tma[2] = enabled;
+ }
+}
+
/*
* On return the kfd_process is fully operational and will be freed when the
* mm is released
@@ -1428,6 +1471,11 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err)
goto err_event_init;
process->is_32bit_user_mode = in_compat_syscall();
+ process->debug_trap_enabled = false;
+ process->debugger_process = NULL;
+ process->exception_enable_mask = 0;
+ atomic_set(&process->debugged_process_count, 0);
+ sema_init(&process->runtime_enable_sema, 0);
process->pasid = kfd_pasid_alloc();
if (process->pasid == 0) {
@@ -1475,6 +1523,8 @@ static struct kfd_process *create_process(const struct task_struct *thread)
kfd_unref_process(process);
get_task_struct(process->lead_thread);
+ INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler);
+
return process;
err_register_notifier:
@@ -1496,39 +1546,7 @@ err_alloc_process:
return ERR_PTR(err);
}
-static int init_doorbell_bitmap(struct qcm_process_device *qpd,
- struct kfd_dev *dev)
-{
- unsigned int i;
- int range_start = dev->shared_resources.non_cp_doorbells_start;
- int range_end = dev->shared_resources.non_cp_doorbells_end;
-
- if (!KFD_IS_SOC15(dev))
- return 0;
-
- qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
- GFP_KERNEL);
- if (!qpd->doorbell_bitmap)
- return -ENOMEM;
-
- /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
- pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
- pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
- range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
- range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
-
- for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
- if (i >= range_start && i <= range_end) {
- __set_bit(i, qpd->doorbell_bitmap);
- __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
- qpd->doorbell_bitmap);
- }
- }
-
- return 0;
-}
-
-struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
+struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
struct kfd_process *p)
{
int i;
@@ -1540,7 +1558,7 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
return NULL;
}
-struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd = NULL;
@@ -1552,11 +1570,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
if (!pdd)
return NULL;
- if (init_doorbell_bitmap(&pdd->qpd, dev)) {
- pr_err("Failed to init doorbell for process\n");
- goto err_free_pdd;
- }
-
pdd->dev = dev;
INIT_LIST_HEAD(&pdd->qpd.queues_list);
INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
@@ -1573,7 +1586,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0);
- if (dev->shared_resources.enable_mes) {
+ if (dev->kfd->shared_resources.enable_mes) {
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
AMDGPU_MES_PROC_CTX_SIZE,
&pdd->proc_ctx_bo,
@@ -1588,6 +1601,11 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
}
p->pdds[p->n_pdds++] = pdd;
+ if (kfd_dbg_is_per_vmid_supported(pdd->dev))
+ pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
+ pdd->dev->adev,
+ false,
+ 0);
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
@@ -1619,7 +1637,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct amdgpu_fpriv *drv_priv;
struct amdgpu_vm *avm;
struct kfd_process *p;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
int ret;
if (!drm_file)
@@ -1679,7 +1697,7 @@ err_reserve_ib_mem:
*
* Assumes that the process lock is held.
*/
-struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
+struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd;
@@ -1707,10 +1725,6 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
}
}
- err = kfd_iommu_bind_process_to_device(pdd);
- if (err)
- goto out;
-
/*
* make sure that runtime_usage counter is incremented just once
* per pdd
@@ -1718,15 +1732,6 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
pdd->runtime_inuse = true;
return pdd;
-
-out:
- /* balance runpm reference count and exit with error */
- if (!pdd->runtime_inuse) {
- pm_runtime_mark_last_busy(adev_to_drm(dev->adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
- }
-
- return ERR_PTR(err);
}
/* Create specific handle mapped to mem from process local memory idr
@@ -1885,13 +1890,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
}
int
-kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
- uint32_t *gpuid, uint32_t *gpuidx)
+kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
+ uint32_t *gpuid, uint32_t *gpuidx)
{
int i;
for (i = 0; i < p->n_pdds; i++)
- if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
+ if (p->pdds[i] && p->pdds[i]->dev == node) {
*gpuid = p->pdds[i]->user_gpu_id;
*gpuidx = i;
return 0;
@@ -1961,8 +1966,10 @@ static void restore_process_worker(struct work_struct *work)
*/
p->last_restore_timestamp = get_jiffies_64();
- ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
- &p->ef);
+ /* VMs may not have been acquired yet during debugging. */
+ if (p->kgd_process_info)
+ ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
+ &p->ef);
if (ret) {
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
@@ -1988,7 +1995,7 @@ void kfd_suspend_all_processes(void)
WARN(debug_evictions, "Evicting all processes");
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
cancel_delayed_work_sync(&p->eviction_work);
- cancel_delayed_work_sync(&p->restore_work);
+ flush_delayed_work(&p->restore_work);
if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
pr_err("Failed to suspend process 0x%x\n", p->pasid);
@@ -2016,7 +2023,7 @@ int kfd_resume_all_processes(void)
return ret;
}
-int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
+int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
struct kfd_process_device *pdd;
@@ -2051,7 +2058,9 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
{
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
+ uint32_t xcc_mask = dev->xcc_mask;
+ int xcc = 0;
/*
* It can be that we race and lose here, but that is extremely unlikely
@@ -2069,9 +2078,133 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
pdd->qpd.vmid);
} else {
- amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev,
- pdd->process->pasid, type);
+ for_each_inst(xcc, xcc_mask)
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(
+ dev->adev, pdd->process->pasid, type, xcc);
+ }
+}
+
+/* assumes caller holds process lock. */
+int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
+{
+ uint32_t irq_drain_fence[8];
+ uint8_t node_id = 0;
+ int r = 0;
+
+ if (!KFD_IS_SOC15(pdd->dev))
+ return 0;
+
+ pdd->process->irq_drain_is_open = true;
+
+ memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
+ irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
+ KFD_IRQ_FENCE_CLIENTID;
+ irq_drain_fence[3] = pdd->process->pasid;
+
+ /*
+ * For GFX 9.4.3, send the NodeId also in IH cookie DW[3]
+ */
+ if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3)) {
+ node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
+ irq_drain_fence[3] |= node_id << 16;
+ }
+
+ /* ensure stale irqs scheduled KFD interrupts and send drain fence. */
+ if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
+ irq_drain_fence)) {
+ pdd->process->irq_drain_is_open = false;
+ return 0;
}
+
+ r = wait_event_interruptible(pdd->process->wait_irq_drain,
+ !READ_ONCE(pdd->process->irq_drain_is_open));
+ if (r)
+ pdd->process->irq_drain_is_open = false;
+
+ return r;
+}
+
+void kfd_process_close_interrupt_drain(unsigned int pasid)
+{
+ struct kfd_process *p;
+
+ p = kfd_lookup_process_by_pasid(pasid);
+
+ if (!p)
+ return;
+
+ WRITE_ONCE(p->irq_drain_is_open, false);
+ wake_up_all(&p->wait_irq_drain);
+ kfd_unref_process(p);
+}
+
+struct send_exception_work_handler_workarea {
+ struct work_struct work;
+ struct kfd_process *p;
+ unsigned int queue_id;
+ uint64_t error_reason;
+};
+
+static void send_exception_work_handler(struct work_struct *work)
+{
+ struct send_exception_work_handler_workarea *workarea;
+ struct kfd_process *p;
+ struct queue *q;
+ struct mm_struct *mm;
+ struct kfd_context_save_area_header __user *csa_header;
+ uint64_t __user *err_payload_ptr;
+ uint64_t cur_err;
+ uint32_t ev_id;
+
+ workarea = container_of(work,
+ struct send_exception_work_handler_workarea,
+ work);
+ p = workarea->p;
+
+ mm = get_task_mm(p->lead_thread);
+
+ if (!mm)
+ return;
+
+ kthread_use_mm(mm);
+
+ q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
+
+ if (!q)
+ goto out;
+
+ csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
+
+ get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr);
+ get_user(cur_err, err_payload_ptr);
+ cur_err |= workarea->error_reason;
+ put_user(cur_err, err_payload_ptr);
+ get_user(ev_id, &csa_header->err_event_id);
+
+ kfd_set_event(p, ev_id);
+
+out:
+ kthread_unuse_mm(mm);
+ mmput(mm);
+}
+
+int kfd_send_exception_to_runtime(struct kfd_process *p,
+ unsigned int queue_id,
+ uint64_t error_reason)
+{
+ struct send_exception_work_handler_workarea worker;
+
+ INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
+
+ worker.p = p;
+ worker.queue_id = queue_id;
+ worker.error_reason = error_reason;
+
+ schedule_work(&worker.work);
+ flush_work(&worker.work);
+ destroy_work_on_stack(&worker.work);
+
+ return 0;
}
struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
@@ -2133,4 +2266,3 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
}
#endif
-
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 4236539d9f93..adb5e4bdc0b2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -81,7 +81,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
{
- struct kfd_dev *dev = pdd->dev;
+ struct kfd_node *dev = pdd->dev;
if (pdd->already_dequeued)
return;
@@ -93,7 +93,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws)
{
- struct kfd_dev *dev = NULL;
+ struct kfd_node *dev = NULL;
struct process_queue_node *pqn;
struct kfd_process_device *pdd;
struct kgd_mem *mem = NULL;
@@ -123,16 +123,26 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
if (!gws && pdd->qpd.num_gws == 0)
return -EINVAL;
- if (gws)
- ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
- gws, &mem);
- else
- ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
- pqn->q->gws);
- if (unlikely(ret))
- return ret;
+ if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) && !dev->kfd->shared_resources.enable_mes) {
+ if (gws)
+ ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
+ gws, &mem);
+ else
+ ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
+ pqn->q->gws);
+ if (unlikely(ret))
+ return ret;
+ pqn->q->gws = mem;
+ } else {
+ /*
+ * Intentionally set GWS to a non-NULL value
+ * for devices that do not use GWS for global wave
+ * synchronization but require the formality
+ * of setting GWS for cooperative groups.
+ */
+ pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
+ }
- pqn->q->gws = mem;
pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
@@ -164,7 +174,9 @@ void pqm_uninit(struct process_queue_manager *pqm)
struct process_queue_node *pqn, *next;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
- if (pqn->q && pqn->q->gws)
+ if (pqn->q && pqn->q->gws &&
+ KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
+ !pqn->q->device->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
pqn->q->gws);
kfd_procfs_del_queue(pqn->q);
@@ -178,7 +190,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
}
static int init_user_queue(struct process_queue_manager *pqm,
- struct kfd_dev *dev, struct queue **q,
+ struct kfd_node *dev, struct queue **q,
struct queue_properties *q_properties,
struct file *f, struct amdgpu_bo *wptr_bo,
unsigned int qid)
@@ -187,6 +199,7 @@ static int init_user_queue(struct process_queue_manager *pqm,
/* Doorbell initialized in user space*/
q_properties->doorbell_ptr = NULL;
+ q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW);
/* let DQM handle it*/
q_properties->vmid = 0;
@@ -199,7 +212,7 @@ static int init_user_queue(struct process_queue_manager *pqm,
(*q)->device = dev;
(*q)->process = pqm->process;
- if (dev->shared_resources.enable_mes) {
+ if (dev->kfd->shared_resources.enable_mes) {
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
AMDGPU_MES_GANG_CTX_SIZE,
&(*q)->gang_ctx_bo,
@@ -224,7 +237,7 @@ cleanup:
}
int pqm_create_queue(struct process_queue_manager *pqm,
- struct kfd_dev *dev,
+ struct kfd_node *dev,
struct file *f,
struct queue_properties *properties,
unsigned int *qid,
@@ -242,6 +255,13 @@ int pqm_create_queue(struct process_queue_manager *pqm,
enum kfd_queue_type type = properties->type;
unsigned int max_queues = 127; /* HWS limit */
+ /*
+ * On GFX 9.4.3, increase the number of queues that
+ * can be created to 255. No HWS limit on GFX 9.4.3.
+ */
+ if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))
+ max_queues = 255;
+
q = NULL;
kq = NULL;
@@ -258,7 +278,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
* Hence we also check the type as well
*/
if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
- max_queues = dev->device_info.max_no_of_hqd/2;
+ max_queues = dev->kfd->device_info.max_no_of_hqd/2;
if (pdd->qpd.queue_count >= max_queues)
return -ENOSPC;
@@ -330,6 +350,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
kq->queue->properties.queue_id = *qid;
pqn->kq = kq;
pqn->q = NULL;
+ retval = kfd_process_drain_interrupts(pdd);
+ if (retval)
+ break;
+
retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
kq, &pdd->qpd);
break;
@@ -344,17 +368,20 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- if (q && p_doorbell_offset_in_process)
+ if (q && p_doorbell_offset_in_process) {
/* Return the doorbell offset within the doorbell page
* to the caller so it can be passed up to user mode
* (in bytes).
- * There are always 1024 doorbells per process, so in case
- * of 8-byte doorbells, there are two doorbell pages per
- * process.
+ * relative doorbell index = Absolute doorbell index -
+ * absolute index of first doorbell in the page.
*/
- *p_doorbell_offset_in_process =
- (q->properties.doorbell_off * sizeof(uint32_t)) &
- (kfd_doorbell_process_slice(dev) - 1);
+ uint32_t first_db_index = amdgpu_doorbell_index_on_bar(pdd->dev->adev,
+ pdd->qpd.proc_doorbells,
+ 0);
+
+ *p_doorbell_offset_in_process = (q->properties.doorbell_off
+ - first_db_index) * sizeof(uint32_t);
+ }
pr_debug("PQM After DQM create queue\n");
@@ -387,7 +414,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
struct process_queue_node *pqn;
struct kfd_process_device *pdd;
struct device_queue_manager *dqm;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
int retval;
dqm = NULL;
@@ -434,12 +461,15 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
}
if (pqn->q->gws) {
- amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
- pqn->q->gws);
+ if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
+ !dev->kfd->shared_resources.enable_mes)
+ amdgpu_amdkfd_remove_gws_from_process(
+ pqm->process->kgd_process_info,
+ pqn->q->gws);
pdd->qpd.num_gws = 0;
}
- if (dev->shared_resources.enable_mes) {
+ if (dev->kfd->shared_resources.enable_mes) {
amdgpu_amdkfd_free_gtt_mem(dev->adev,
pqn->q->gang_ctx_bo);
if (pqn->q->wptr_bo)
@@ -477,6 +507,7 @@ int pqm_update_queue_properties(struct process_queue_manager *pqm,
pqn->q->properties.queue_size = p->queue_size;
pqn->q->properties.queue_percent = p->queue_percent;
pqn->q->properties.priority = p->priority;
+ pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q, NULL);
@@ -498,8 +529,12 @@ int pqm_update_mqd(struct process_queue_manager *pqm,
return -EFAULT;
}
+ /* CUs are masked for debugger requirements so deny user mask */
+ if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
+ return -EBUSY;
+
/* ASICs that have WGPs must enforce pairwise enabled mask checks. */
- if (minfo && minfo->update_flag == UPDATE_FLAG_CU_MASK && minfo->cu_mask.ptr &&
+ if (minfo && minfo->cu_mask.ptr &&
KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
int i;
@@ -518,6 +553,9 @@ int pqm_update_mqd(struct process_queue_manager *pqm,
if (retval != 0)
return retval;
+ if (minfo && minfo->cu_mask.ptr)
+ pqn->q->properties.is_user_cu_masked = true;
+
return 0;
}
@@ -565,6 +603,46 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
save_area_used_size);
}
+int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
+ uint64_t exception_clear_mask,
+ void __user *buf,
+ int *num_qss_entries,
+ uint32_t *entry_size)
+{
+ struct process_queue_node *pqn;
+ struct kfd_queue_snapshot_entry src;
+ uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
+ int r = 0;
+
+ *num_qss_entries = 0;
+ if (!(*entry_size))
+ return -EINVAL;
+
+ *entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
+ mutex_lock(&pqm->process->event_mutex);
+
+ memset(&src, 0, sizeof(src));
+
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ if (!pqn->q)
+ continue;
+
+ if (*num_qss_entries < tmp_qss_entries) {
+ set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
+
+ if (copy_to_user(buf, &src, *entry_size)) {
+ r = -EFAULT;
+ break;
+ }
+ buf += tmp_entry_size;
+ }
+ *num_qss_entries += 1;
+ }
+
+ mutex_unlock(&pqm->process->event_mutex);
+ return r;
+}
+
static int get_queue_data_sizes(struct kfd_process_device *pdd,
struct queue *q,
uint32_t *mqd_size,
@@ -858,12 +936,6 @@ int kfd_criu_restore_queue(struct kfd_process *p,
goto exit;
}
- if (!pdd->doorbell_index &&
- kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
- ret = -ENOMEM;
- goto exit;
- }
-
/* data stored in this order: mqd, ctl_stack */
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
@@ -927,7 +999,9 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
struct queue *q;
enum KFD_MQD_TYPE mqd_type;
struct mqd_manager *mqd_mgr;
- int r = 0;
+ int r = 0, xcc, num_xccs = 1;
+ void *mqd;
+ uint64_t size = 0;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
if (pqn->q) {
@@ -943,6 +1017,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
seq_printf(m, " Compute queue on device %x\n",
q->device->id);
mqd_type = KFD_MQD_TYPE_CP;
+ num_xccs = NUM_XCC(q->device->xcc_mask);
break;
default:
seq_printf(m,
@@ -951,6 +1026,8 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
continue;
}
mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
+ size = mqd_mgr->mqd_stride(mqd_mgr,
+ &q->properties);
} else if (pqn->kq) {
q = pqn->kq->queue;
mqd_mgr = pqn->kq->mqd_mgr;
@@ -972,9 +1049,12 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
continue;
}
- r = mqd_mgr->debugfs_show_mqd(m, q->mqd);
- if (r != 0)
- break;
+ for (xcc = 0; xcc < num_xccs; xcc++) {
+ mqd = q->mqd + size * xcc;
+ r = mqd_mgr->debugfs_show_mqd(m, mqd);
+ if (r != 0)
+ break;
+ }
}
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 0472b56de245..d9953c2b2661 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -36,7 +36,7 @@ struct kfd_smi_client {
wait_queue_head_t wait_queue;
/* events enabled */
uint64_t events;
- struct kfd_dev *dev;
+ struct kfd_node *dev;
spinlock_t lock;
struct rcu_head rcu;
pid_t pid;
@@ -149,7 +149,7 @@ static void kfd_smi_ev_client_free(struct rcu_head *p)
static int kfd_smi_ev_release(struct inode *inode, struct file *filep)
{
struct kfd_smi_client *client = filep->private_data;
- struct kfd_dev *dev = client->dev;
+ struct kfd_node *dev = client->dev;
spin_lock(&dev->smi_lock);
list_del_rcu(&client->list);
@@ -171,7 +171,7 @@ static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client,
return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event);
}
-static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev,
+static void add_event_to_kfifo(pid_t pid, struct kfd_node *dev,
unsigned int smi_event, char *event_msg, int len)
{
struct kfd_smi_client *client;
@@ -196,7 +196,7 @@ static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev,
}
__printf(4, 5)
-static void kfd_smi_event_add(pid_t pid, struct kfd_dev *dev,
+static void kfd_smi_event_add(pid_t pid, struct kfd_node *dev,
unsigned int event, char *fmt, ...)
{
char fifo_in[KFD_SMI_EVENT_MSG_SIZE];
@@ -215,7 +215,7 @@ static void kfd_smi_event_add(pid_t pid, struct kfd_dev *dev,
add_event_to_kfifo(pid, dev, event, fifo_in, len);
}
-void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset)
+void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset)
{
unsigned int event;
@@ -228,7 +228,7 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset)
kfd_smi_event_add(0, dev, event, "%x\n", dev->reset_seq_num);
}
-void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
+void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev,
uint64_t throttle_bitmask)
{
kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, "%llx:%llx\n",
@@ -236,7 +236,7 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
amdgpu_dpm_get_thermal_throttling_counter(dev->adev));
}
-void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
+void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
{
struct amdgpu_task_info task_info;
@@ -250,58 +250,58 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
task_info.pid, task_info.task_name);
}
-void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
unsigned long address, bool write_fault,
ktime_t ts)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_PAGE_FAULT_START,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_START,
"%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid,
- address, dev->id, write_fault ? 'W' : 'R');
+ address, node->id, write_fault ? 'W' : 'R');
}
-void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
unsigned long address, bool migration)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_PAGE_FAULT_END,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_END,
"%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(),
- pid, address, dev->id, migration ? 'M' : 'U');
+ pid, address, node->id, migration ? 'M' : 'U');
}
-void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
uint32_t from, uint32_t to,
uint32_t prefetch_loc, uint32_t preferred_loc,
uint32_t trigger)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_MIGRATE_START,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_START,
"%lld -%d @%lx(%lx) %x->%x %x:%x %d\n",
ktime_get_boottime_ns(), pid, start, end - start,
from, to, prefetch_loc, preferred_loc, trigger);
}
-void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
uint32_t from, uint32_t to, uint32_t trigger)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_MIGRATE_END,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END,
"%lld -%d @%lx(%lx) %x->%x %d\n",
ktime_get_boottime_ns(), pid, start, end - start,
from, to, trigger);
}
-void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
uint32_t trigger)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_QUEUE_EVICTION,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_EVICTION,
"%lld -%d %x %d\n", ktime_get_boottime_ns(), pid,
- dev->id, trigger);
+ node->id, trigger);
}
-void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid)
+void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_QUEUE_RESTORE,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE,
"%lld -%d %x\n", ktime_get_boottime_ns(), pid,
- dev->id);
+ node->id);
}
void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
@@ -324,16 +324,16 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
kfd_unref_process(p);
}
-void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
unsigned long address, unsigned long last,
uint32_t trigger)
{
- kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_UNMAP_FROM_GPU,
+ kfd_smi_event_add(pid, node, KFD_SMI_EVENT_UNMAP_FROM_GPU,
"%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(),
- pid, address, last - address + 1, dev->id, trigger);
+ pid, address, last - address + 1, node->id, trigger);
}
-int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
+int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
{
struct kfd_smi_client *client;
int ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
index 76fe4e0ec2d2..fa95c2dfd587 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
@@ -24,29 +24,29 @@
#ifndef KFD_SMI_EVENTS_H_INCLUDED
#define KFD_SMI_EVENTS_H_INCLUDED
-int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd);
-void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid);
-void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
+int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd);
+void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid);
+void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev,
uint64_t throttle_bitmask);
-void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset);
-void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset);
+void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
unsigned long address, bool write_fault,
ktime_t ts);
-void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
unsigned long address, bool migration);
-void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
uint32_t from, uint32_t to,
uint32_t prefetch_loc, uint32_t preferred_loc,
uint32_t trigger);
-void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
uint32_t from, uint32_t to, uint32_t trigger);
-void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
uint32_t trigger);
-void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid);
+void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid);
void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm);
-void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid,
+void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
unsigned long address, unsigned long last,
uint32_t trigger);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 96a138a39515..dbbe6559d6bc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -23,7 +23,10 @@
#include <linux/types.h>
#include <linux/sched/task.h>
+#include <linux/dynamic_debug.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_exec.h>
+
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
@@ -46,6 +49,13 @@
* page table is updated.
*/
#define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
+#define dynamic_svm_range_dump(svms) \
+ _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
+#else
+#define dynamic_svm_range_dump(svms) \
+ do { if (0) svm_range_debug_dump(svms); } while (0)
+#endif
/* Giant svm range split into smaller ranges based on this, it is decided using
* minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
@@ -170,12 +180,11 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
page = hmm_pfn_to_page(hmm_pfns[i]);
if (is_zone_device_page(page)) {
- struct amdgpu_device *bo_adev =
- amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
+ struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
bo_adev->vm_manager.vram_base_offset -
- bo_adev->kfd.dev->pgmap.range.start;
+ bo_adev->kfd.pgmap.range.start;
addr[i] |= SVM_RANGE_VRAM_DOMAIN;
pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
continue;
@@ -240,7 +249,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
}
}
-void svm_range_free_dma_mappings(struct svm_range *prange)
+void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
{
struct kfd_process_device *pdd;
dma_addr_t *dma_addr;
@@ -261,13 +270,14 @@ void svm_range_free_dma_mappings(struct svm_range *prange)
continue;
}
dev = &pdd->dev->adev->pdev->dev;
- svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
+ if (unmap_dma)
+ svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
kvfree(dma_addr);
prange->dma_addr[gpuidx] = NULL;
}
}
-static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
+static void svm_range_free(struct svm_range *prange, bool do_unmap)
{
uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
@@ -276,12 +286,12 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
prange->start, prange->last);
svm_range_vram_node_free(prange);
- svm_range_free_dma_mappings(prange);
+ svm_range_free_dma_mappings(prange, do_unmap);
- if (update_mem_usage && !p->xnack_enabled) {
+ if (do_unmap && !p->xnack_enabled) {
pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
}
mutex_destroy(&prange->lock);
mutex_destroy(&prange->migrate_mutex);
@@ -314,7 +324,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
p = container_of(svms, struct kfd_process, svms);
if (!p->xnack_enabled && update_mem_usage &&
amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
pr_info("SVM mapping failed, exceeds resident system memory limit\n");
kfree(prange);
return NULL;
@@ -424,10 +434,8 @@ static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
}
static bool
-svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
+svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
{
- struct amdgpu_device *bo_adev;
-
mutex_lock(&prange->lock);
if (!prange->svm_bo) {
mutex_unlock(&prange->lock);
@@ -440,12 +448,11 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
}
if (svm_bo_ref_unless_zero(prange->svm_bo)) {
/*
- * Migrate from GPU to GPU, remove range from source bo_adev
- * svm_bo range list, and return false to allocate svm_bo from
- * destination adev.
+ * Migrate from GPU to GPU, remove range from source svm_bo->node
+ * range list, and return false to allocate svm_bo from destination
+ * node.
*/
- bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
- if (bo_adev != adev) {
+ if (prange->svm_bo->node != node) {
mutex_unlock(&prange->lock);
spin_lock(&prange->svm_bo->list_lock);
@@ -513,7 +520,7 @@ static struct svm_range_bo *svm_range_bo_new(void)
}
int
-svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
+svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear)
{
struct amdgpu_bo_param bp;
@@ -528,7 +535,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
prange->start, prange->last);
- if (svm_range_validate_svm_bo(adev, prange))
+ if (svm_range_validate_svm_bo(node, prange))
return 0;
svm_bo = svm_range_bo_new();
@@ -542,6 +549,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
kfree(svm_bo);
return -ESRCH;
}
+ svm_bo->node = node;
svm_bo->eviction_fence =
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
mm,
@@ -558,13 +566,20 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
bp.type = ttm_bo_type_device;
bp.resv = NULL;
+ if (node->xcp)
+ bp.xcp_id_plus1 = node->xcp->id + 1;
- r = amdgpu_bo_create_user(adev, &bp, &ubo);
+ r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
if (r) {
pr_debug("failed %d to create bo\n", r);
goto create_bo_failed;
}
bo = &ubo->bo;
+
+ pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
+ bo->tbo.resource->start << PAGE_SHIFT, bp.size,
+ bp.xcp_id_plus1 - 1);
+
r = amdgpu_bo_reserve(bo, true);
if (r) {
pr_debug("failed %d to reserve bo\n", r);
@@ -617,45 +632,30 @@ void svm_range_vram_node_free(struct svm_range *prange)
prange->ttm_res = NULL;
}
-struct amdgpu_device *
-svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
+struct kfd_node *
+svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
{
- struct kfd_process_device *pdd;
struct kfd_process *p;
- int32_t gpu_idx;
+ struct kfd_process_device *pdd;
p = container_of(prange->svms, struct kfd_process, svms);
-
- gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
- if (gpu_idx < 0) {
- pr_debug("failed to get device by id 0x%x\n", gpu_id);
- return NULL;
- }
- pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
+ pdd = kfd_process_device_data_by_id(p, gpu_id);
if (!pdd) {
- pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
+ pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
return NULL;
}
- return pdd->dev->adev;
+ return pdd->dev;
}
struct kfd_process_device *
-svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
+svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
{
struct kfd_process *p;
- int32_t gpu_idx, gpuid;
- int r;
p = container_of(prange->svms, struct kfd_process, svms);
- r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
- if (r) {
- pr_debug("failed to get device id by adev %p\n", adev);
- return NULL;
- }
-
- return kfd_process_device_from_gpuidx(p, gpu_idx);
+ return kfd_get_process_device_data(node, p);
}
static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
@@ -735,7 +735,9 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
case KFD_IOCTL_SVM_ATTR_ACCESS:
case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
- *update_mapping = true;
+ if (!p->xnack_enabled)
+ *update_mapping = true;
+
gpuidx = kfd_process_gpuidx_from_gpuid(p,
attrs[i].value);
if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
@@ -818,7 +820,7 @@ svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
}
}
- return true;
+ return !prange->is_error_flag;
}
/**
@@ -858,6 +860,37 @@ static void svm_range_debug_dump(struct svm_range_list *svms)
}
}
+static void *
+svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
+ uint64_t offset)
+{
+ unsigned char *dst;
+
+ dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
+ if (!dst)
+ return NULL;
+ memcpy(dst, (unsigned char *)psrc + offset, num_elements * size);
+
+ return (void *)dst;
+}
+
+static int
+svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
+{
+ int i;
+
+ for (i = 0; i < MAX_GPU_INSTANCE; i++) {
+ if (!src->dma_addr[i])
+ continue;
+ dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
+ sizeof(*src->dma_addr[i]), src->npages, 0);
+ if (!dst->dma_addr[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int
svm_range_split_array(void *ppnew, void *ppold, size_t size,
uint64_t old_start, uint64_t old_n,
@@ -872,22 +905,16 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size,
if (!pold)
return 0;
- new = kvmalloc_array(new_n, size, GFP_KERNEL);
+ d = (new_start - old_start) * size;
+ new = svm_range_copy_array(pold, size, new_n, d);
if (!new)
return -ENOMEM;
-
- d = (new_start - old_start) * size;
- memcpy(new, pold + d, new_n * size);
-
- old = kvmalloc_array(old_n, size, GFP_KERNEL);
+ d = (new_start == old_start) ? new_n * size : 0;
+ old = svm_range_copy_array(pold, size, old_n, d);
if (!old) {
kvfree(new);
return -ENOMEM;
}
-
- d = (new_start == old_start) ? new_n * size : 0;
- memcpy(old, pold + d, old_n * size);
-
kvfree(pold);
*(void **)ppold = old;
*(void **)ppnew = new;
@@ -1146,31 +1173,39 @@ svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
}
return 0;
}
+static bool
+svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
+{
+ return (node_a->adev == node_b->adev ||
+ amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
+}
static uint64_t
-svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
- int domain)
+svm_range_get_pte_flags(struct kfd_node *node,
+ struct svm_range *prange, int domain)
{
- struct amdgpu_device *bo_adev;
+ struct kfd_node *bo_node;
uint32_t flags = prange->flags;
uint32_t mapping_flags = 0;
uint64_t pte_flags;
bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
+ bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/
+ unsigned int mtype_local;
if (domain == SVM_RANGE_VRAM_DOMAIN)
- bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
+ bo_node = prange->svm_bo->node;
- switch (KFD_GC_VERSION(adev->kfd.dev)) {
+ switch (node->adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 4, 1):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
- if (bo_adev == adev) {
+ if (bo_node == node) {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
} else {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
- if (amdgpu_xgmi_same_hive(adev, bo_adev))
+ if (svm_nodes_in_same_hive(node, bo_node))
snoop = true;
}
} else {
@@ -1180,15 +1215,15 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
break;
case IP_VERSION(9, 4, 2):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
- if (bo_adev == adev) {
+ if (bo_node == node) {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (node->adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
- if (amdgpu_xgmi_same_hive(adev, bo_adev))
+ if (svm_nodes_in_same_hive(node, bo_node))
snoop = true;
}
} else {
@@ -1196,6 +1231,37 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
break;
+ case IP_VERSION(9, 4, 3):
+ mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
+ (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW);
+ snoop = true;
+ if (uncached) {
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ } else if (domain == SVM_RANGE_VRAM_DOMAIN) {
+ /* local HBM region close to partition */
+ if (bo_node->adev == node->adev &&
+ (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
+ mapping_flags |= mtype_local;
+ /* local HBM region far from partition or remote XGMI GPU */
+ else if (svm_nodes_in_same_hive(bo_node, node))
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ /* PCIe P2P */
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ /* system memory accessed by the APU */
+ } else if (node->adev->flags & AMD_IS_APU) {
+ /* On NUMA systems, locality is determined per-page
+ * in amdgpu_gmc_override_vm_pte_flags
+ */
+ if (num_possible_nodes() <= 1)
+ mapping_flags |= mtype_local;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ /* system memory accessed by the dGPU */
+ } else {
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ }
+ break;
default:
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
@@ -1212,7 +1278,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
- pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
+ pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
return pte_flags;
}
@@ -1319,7 +1385,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
- pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
+ pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
if (readonly)
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
@@ -1328,6 +1394,10 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
(last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
pte_flags);
+ /* For dGPU mode, we use same vm_manager to allocate VRAM for
+ * different memory partition based on fpfn/lpfn, we should use
+ * same vm_manager.vram_base_offset regardless memory partition.
+ */
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
last_start, prange->start + i,
pte_flags,
@@ -1365,16 +1435,14 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
unsigned long *bitmap, bool wait, bool flush_tlb)
{
struct kfd_process_device *pdd;
- struct amdgpu_device *bo_adev;
+ struct amdgpu_device *bo_adev = NULL;
struct kfd_process *p;
struct dma_fence *fence = NULL;
uint32_t gpuidx;
int r = 0;
if (prange->svm_bo && prange->ttm_res)
- bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
- else
- bo_adev = NULL;
+ bo_adev = prange->svm_bo->node->adev;
p = container_of(prange->svms, struct kfd_process, svms);
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
@@ -1423,37 +1491,34 @@ struct svm_validate_context {
struct svm_range *prange;
bool intr;
DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
- struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
- struct list_head validate_list;
- struct ww_acquire_ctx ticket;
+ struct drm_exec exec;
};
-static int svm_range_reserve_bos(struct svm_validate_context *ctx)
+static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
{
struct kfd_process_device *pdd;
struct amdgpu_vm *vm;
uint32_t gpuidx;
int r;
- INIT_LIST_HEAD(&ctx->validate_list);
- for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
- pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
- if (!pdd) {
- pr_debug("failed to find device idx %d\n", gpuidx);
- return -EINVAL;
- }
- vm = drm_priv_to_vm(pdd->drm_priv);
-
- ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
- ctx->tv[gpuidx].num_shared = 4;
- list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
- }
+ drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0);
+ drm_exec_until_all_locked(&ctx->exec) {
+ for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
+ pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
+ if (!pdd) {
+ pr_debug("failed to find device idx %d\n", gpuidx);
+ r = -EINVAL;
+ goto unreserve_out;
+ }
+ vm = drm_priv_to_vm(pdd->drm_priv);
- r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
- ctx->intr, NULL);
- if (r) {
- pr_debug("failed %d to reserve bo\n", r);
- return r;
+ r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
+ drm_exec_retry_on_contention(&ctx->exec);
+ if (unlikely(r)) {
+ pr_debug("failed %d to reserve bo\n", r);
+ goto unreserve_out;
+ }
+ }
}
for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
@@ -1476,13 +1541,13 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
return 0;
unreserve_out:
- ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
+ drm_exec_fini(&ctx->exec);
return r;
}
static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
{
- ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
+ drm_exec_fini(&ctx->exec);
}
static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
@@ -1490,6 +1555,8 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
struct kfd_process_device *pdd;
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
+ if (!pdd)
+ return NULL;
return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
}
@@ -1522,48 +1589,54 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
struct svm_range *prange, int32_t gpuidx,
bool intr, bool wait, bool flush_tlb)
{
- struct svm_validate_context ctx;
+ struct svm_validate_context *ctx;
unsigned long start, end, addr;
struct kfd_process *p;
void *owner;
int32_t idx;
int r = 0;
- ctx.process = container_of(prange->svms, struct kfd_process, svms);
- ctx.prange = prange;
- ctx.intr = intr;
+ ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->process = container_of(prange->svms, struct kfd_process, svms);
+ ctx->prange = prange;
+ ctx->intr = intr;
if (gpuidx < MAX_GPU_INSTANCE) {
- bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
- bitmap_set(ctx.bitmap, gpuidx, 1);
- } else if (ctx.process->xnack_enabled) {
- bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
+ bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
+ bitmap_set(ctx->bitmap, gpuidx, 1);
+ } else if (ctx->process->xnack_enabled) {
+ bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
/* If prefetch range to GPU, or GPU retry fault migrate range to
* GPU, which has ACCESS attribute to the range, create mapping
* on that GPU.
*/
if (prange->actual_loc) {
- gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
+ gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
prange->actual_loc);
if (gpuidx < 0) {
WARN_ONCE(1, "failed get device by id 0x%x\n",
prange->actual_loc);
- return -EINVAL;
+ r = -EINVAL;
+ goto free_ctx;
}
if (test_bit(gpuidx, prange->bitmap_access))
- bitmap_set(ctx.bitmap, gpuidx, 1);
+ bitmap_set(ctx->bitmap, gpuidx, 1);
}
} else {
- bitmap_or(ctx.bitmap, prange->bitmap_access,
+ bitmap_or(ctx->bitmap, prange->bitmap_access,
prange->bitmap_aip, MAX_GPU_INSTANCE);
}
- if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) {
- if (!prange->mapped_to_gpu)
- return 0;
-
- bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
+ if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
+ bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
+ if (!prange->mapped_to_gpu ||
+ bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
+ r = 0;
+ goto free_ctx;
+ }
}
if (prange->actual_loc && !prange->ttm_res) {
@@ -1571,15 +1644,16 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
* svm_migrate_ram_to_vram after allocating a BO.
*/
WARN_ONCE(1, "VRAM BO missing during validation\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto free_ctx;
}
- svm_range_reserve_bos(&ctx);
+ svm_range_reserve_bos(ctx, intr);
p = container_of(prange->svms, struct kfd_process, svms);
- owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
+ owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
MAX_GPU_INSTANCE));
- for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
+ for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
if (kfd_svm_page_owner(p, idx) != owner) {
owner = NULL;
break;
@@ -1616,7 +1690,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
offset = (addr - start) >> PAGE_SHIFT;
- r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
+ r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
hmm_range->hmm_pfns);
if (r) {
pr_debug("failed %d to dma map range\n", r);
@@ -1636,7 +1710,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
r = svm_range_map_to_gpus(prange, offset, npages, readonly,
- ctx.bitmap, wait, flush_tlb);
+ ctx->bitmap, wait, flush_tlb);
unlock_out:
svm_range_unlock(prange);
@@ -1650,11 +1724,15 @@ unlock_out:
}
unreserve_out:
- svm_range_unreserve_bos(&ctx);
+ svm_range_unreserve_bos(ctx);
+ prange->is_error_flag = !!r;
if (!r)
prange->validate_timestamp = ktime_get_boottime();
+free_ctx:
+ kfree(ctx);
+
return r;
}
@@ -1783,6 +1861,7 @@ out_reschedule:
* @mm: current process mm_struct
* @start: starting process queue number
* @last: last process queue number
+ * @event: mmu notifier event when range is evicted or migrated
*
* Stop all queues of the process to ensure GPU doesn't access the memory, then
* return to let CPU evict the buffer and proceed CPU pagetable update.
@@ -1881,7 +1960,10 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
new = svm_range_new(old->svms, old->start, old->last, false);
if (!new)
return NULL;
-
+ if (svm_range_copy_dma_addrs(new, old)) {
+ svm_range_free(new, false);
+ return NULL;
+ }
if (old->svm_bo) {
new->ttm_res = old->ttm_res;
new->offset = old->offset;
@@ -1906,14 +1988,23 @@ void svm_range_set_max_pages(struct amdgpu_device *adev)
{
uint64_t max_pages;
uint64_t pages, _pages;
+ uint64_t min_pages = 0;
+ int i, id;
+
+ for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
+ if (adev->kfd.dev->nodes[i]->xcp)
+ id = adev->kfd.dev->nodes[i]->xcp->id;
+ else
+ id = -1;
+ pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
+ pages = clamp(pages, 1ULL << 9, 1ULL << 18);
+ pages = rounddown_pow_of_two(pages);
+ min_pages = min_not_zero(min_pages, pages);
+ }
- /* 1/32 VRAM size in pages */
- pages = adev->gmc.real_vram_size >> 17;
- pages = clamp(pages, 1ULL << 9, 1ULL << 18);
- pages = rounddown_pow_of_two(pages);
do {
max_pages = READ_ONCE(max_svm_range_pages);
- _pages = min_not_zero(max_pages, pages);
+ _pages = min_not_zero(max_pages, min_pages);
} while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
}
@@ -2507,29 +2598,31 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
*/
static int32_t
svm_range_best_restore_location(struct svm_range *prange,
- struct amdgpu_device *adev,
+ struct kfd_node *node,
int32_t *gpuidx)
{
- struct amdgpu_device *bo_adev, *preferred_adev;
+ struct kfd_node *bo_node, *preferred_node;
struct kfd_process *p;
uint32_t gpuid;
int r;
p = container_of(prange->svms, struct kfd_process, svms);
- r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
+ r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
if (r < 0) {
pr_debug("failed to get gpuid from kgd\n");
return -1;
}
+ if (node->adev->gmc.is_app_apu)
+ return 0;
+
if (prange->preferred_loc == gpuid ||
prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
return prange->preferred_loc;
} else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
- preferred_adev = svm_range_get_adev_by_id(prange,
- prange->preferred_loc);
- if (amdgpu_xgmi_same_hive(adev, preferred_adev))
+ preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
+ if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
return prange->preferred_loc;
/* fall through */
}
@@ -2541,8 +2634,8 @@ svm_range_best_restore_location(struct svm_range *prange,
if (!prange->actual_loc)
return 0;
- bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
- if (amdgpu_xgmi_same_hive(adev, bo_adev))
+ bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
+ if (bo_node && svm_nodes_in_same_hive(node, bo_node))
return prange->actual_loc;
else
return 0;
@@ -2659,7 +2752,7 @@ svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
}
static struct
-svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
+svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
struct kfd_process *p,
struct mm_struct *mm,
int64_t addr)
@@ -2694,7 +2787,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
pr_debug("Failed to create prange in address [0x%llx]\n", addr);
return NULL;
}
- if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
+ if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
pr_debug("failed to get gpuid from kgd\n");
svm_range_free(prange, true);
return NULL;
@@ -2748,7 +2841,7 @@ static bool svm_range_skip_recover(struct svm_range *prange)
}
static void
-svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
+svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
int32_t gpuidx)
{
struct kfd_process_device *pdd;
@@ -2761,7 +2854,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
uint32_t gpuid;
int r;
- r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
+ r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
if (r < 0)
return;
}
@@ -2789,6 +2882,7 @@ svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
int
svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
+ uint32_t vmid, uint32_t node_id,
uint64_t addr, bool write_fault)
{
struct mm_struct *mm = NULL;
@@ -2796,6 +2890,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
struct svm_range *prange;
struct kfd_process *p;
ktime_t timestamp = ktime_get_boottime();
+ struct kfd_node *node;
int32_t best_loc;
int32_t gpuidx = MAX_GPU_INSTANCE;
bool write_locked = false;
@@ -2803,7 +2898,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
bool migration = false;
int r = 0;
- if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
+ if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
pr_debug("device does not support SVM\n");
return -EFAULT;
}
@@ -2839,6 +2934,13 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
goto out;
}
+ node = kfd_node_by_irq_ids(adev, node_id, vmid);
+ if (!node) {
+ pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
+ vmid);
+ r = -EFAULT;
+ goto out;
+ }
mmap_read_lock(mm);
retry_write_locked:
mutex_lock(&svms->lock);
@@ -2857,7 +2959,7 @@ retry_write_locked:
write_locked = true;
goto retry_write_locked;
}
- prange = svm_range_create_unregistered_range(adev, p, mm, addr);
+ prange = svm_range_create_unregistered_range(node, p, mm, addr);
if (!prange) {
pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
svms, addr);
@@ -2872,7 +2974,7 @@ retry_write_locked:
mutex_lock(&prange->migrate_mutex);
if (svm_range_skip_recover(prange)) {
- amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
+ amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
r = 0;
goto out_unlock_range;
}
@@ -2903,7 +3005,7 @@ retry_write_locked:
goto out_unlock_range;
}
- best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
+ best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
if (best_loc == -1) {
pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
svms, prange->start, prange->last);
@@ -2915,7 +3017,7 @@ retry_write_locked:
svms, prange->start, prange->last, best_loc,
prange->actual_loc);
- kfd_smi_event_page_fault_start(adev->kfd.dev, p->lead_thread->pid, addr,
+ kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
write_fault, timestamp);
if (prange->actual_loc != best_loc) {
@@ -2953,7 +3055,7 @@ retry_write_locked:
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
r, svms, prange->start, prange->last);
- kfd_smi_event_page_fault_end(adev->kfd.dev, p->lead_thread->pid, addr,
+ kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
migration);
out_unlock_range:
@@ -2962,7 +3064,7 @@ out_unlock_svms:
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
- svm_range_count_fault(adev, p, gpuidx);
+ svm_range_count_fault(node, p, gpuidx);
mmput(mm);
out:
@@ -2970,7 +3072,7 @@ out:
if (r == -EAGAIN) {
pr_debug("recover vm fault later\n");
- amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
+ amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
r = 0;
}
return r;
@@ -2994,10 +3096,10 @@ svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
if (xnack_enabled) {
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
} else {
r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
if (r)
goto out_unlock;
reserved_size += size;
@@ -3007,10 +3109,10 @@ svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
size = (prange->last - prange->start + 1) << PAGE_SHIFT;
if (xnack_enabled) {
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
} else {
r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
if (r)
goto out_unlock;
reserved_size += size;
@@ -3023,7 +3125,7 @@ out_unlock:
if (r)
amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
- KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
else
/* Change xnack mode must be inside svms lock, to avoid race with
* svm_range_deferred_list_work unreserve memory in parallel.
@@ -3081,7 +3183,7 @@ int svm_range_list_init(struct kfd_process *p)
spin_lock_init(&svms->deferred_list_lock);
for (i = 0; i < p->n_pdds; i++)
- if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
+ if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
bitmap_set(svms->bitmap_supported, i, 1);
return 0;
@@ -3212,7 +3314,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
uint32_t best_loc = prange->prefetch_loc;
struct kfd_process_device *pdd;
- struct amdgpu_device *bo_adev;
+ struct kfd_node *bo_node;
struct kfd_process *p;
uint32_t gpuidx;
@@ -3221,9 +3323,14 @@ svm_range_best_prefetch_location(struct svm_range *prange)
if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
goto out;
- bo_adev = svm_range_get_adev_by_id(prange, best_loc);
- if (!bo_adev) {
- WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
+ bo_node = svm_range_get_node_by_id(prange, best_loc);
+ if (!bo_node) {
+ WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
+ best_loc = 0;
+ goto out;
+ }
+
+ if (bo_node->adev->gmc.is_app_apu) {
best_loc = 0;
goto out;
}
@@ -3241,10 +3348,10 @@ svm_range_best_prefetch_location(struct svm_range *prange)
continue;
}
- if (pdd->dev->adev == bo_adev)
+ if (pdd->dev->adev == bo_node->adev)
continue;
- if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
+ if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
best_loc = 0;
break;
}
@@ -3492,7 +3599,7 @@ out_unlock_range:
break;
}
- svm_range_debug_dump(svms);
+ dynamic_svm_range_dump(svms);
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 7a33b93f9df6..9e668eeefb32 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -48,6 +48,7 @@ struct svm_range_bo {
struct work_struct eviction_work;
uint32_t evicting;
struct work_struct release_work;
+ struct kfd_node *node;
};
enum svm_work_list_ops {
@@ -133,6 +134,7 @@ struct svm_range {
DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
bool validated_once;
bool mapped_to_gpu;
+ bool is_error_flag;
};
static inline void svm_range_lock(struct svm_range *prange)
@@ -163,16 +165,17 @@ int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
struct svm_range *svm_range_from_addr(struct svm_range_list *svms,
unsigned long addr,
struct svm_range **parent);
-struct amdgpu_device *svm_range_get_adev_by_id(struct svm_range *prange,
- uint32_t id);
-int svm_range_vram_node_new(struct amdgpu_device *adev,
- struct svm_range *prange, bool clear);
+struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange,
+ uint32_t gpu_id);
+int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
+ bool clear);
void svm_range_vram_node_free(struct svm_range *prange);
int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
unsigned long addr, struct svm_range *parent,
struct svm_range *prange);
-int svm_range_restore_pages(struct amdgpu_device *adev,
- unsigned int pasid, uint64_t addr, bool write_fault);
+int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
+ uint32_t vmid, uint32_t node_id, uint64_t addr,
+ bool write_fault);
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
void svm_range_add_list_work(struct svm_range_list *svms,
struct svm_range *prange, struct mm_struct *mm,
@@ -180,7 +183,7 @@ void svm_range_add_list_work(struct svm_range_list *svms,
void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
-void svm_range_free_dma_mappings(struct svm_range *prange);
+void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma);
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,
@@ -192,13 +195,14 @@ int kfd_criu_restore_svm(struct kfd_process *p,
uint64_t max_priv_data_size);
int kfd_criu_resume_svm(struct kfd_process *p);
struct kfd_process_device *
-svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
+svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node);
void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm);
/* SVM API and HMM page migration work together, device memory type
* is initialized to not 0 when page migration register device memory.
*/
-#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
+#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
+ (adev)->gmc.is_app_apu)
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
@@ -219,8 +223,9 @@ static inline void svm_range_list_fini(struct kfd_process *p)
}
static inline int svm_range_restore_pages(struct amdgpu_device *adev,
- unsigned int pasid, uint64_t addr,
- bool write_fault)
+ unsigned int pasid,
+ uint32_t client_id, uint32_t node_id,
+ uint64_t addr, bool write_fault)
{
return -EFAULT;
}
@@ -261,6 +266,10 @@ static inline int kfd_criu_resume_svm(struct kfd_process *p)
return 0;
}
+static inline void svm_range_set_max_pages(struct amdgpu_device *adev)
+{
+}
+
#define KFD_IS_SVM_API_SUPPORTED(dev) false
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 8e4124dcb6e4..ff98fded9534 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -36,8 +36,8 @@
#include "kfd_crat.h"
#include "kfd_topology.h"
#include "kfd_device_queue_manager.h"
-#include "kfd_iommu.h"
#include "kfd_svm.h"
+#include "kfd_debug.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_ras.h"
#include "amdgpu.h"
@@ -96,7 +96,7 @@ struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
return ret;
}
-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+struct kfd_node *kfd_device_by_id(uint32_t gpu_id)
{
struct kfd_topology_device *top_dev;
@@ -107,10 +107,10 @@ struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
return top_dev->gpu;
}
-struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
+struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev)
{
struct kfd_topology_device *top_dev;
- struct kfd_dev *device = NULL;
+ struct kfd_node *device = NULL;
down_read(&topology_lock);
@@ -125,24 +125,6 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
return device;
}
-struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev)
-{
- struct kfd_topology_device *top_dev;
- struct kfd_dev *device = NULL;
-
- down_read(&topology_lock);
-
- list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu && top_dev->gpu->adev == adev) {
- device = top_dev->gpu;
- break;
- }
-
- up_read(&topology_lock);
-
- return device;
-}
-
/* Called with write topology_lock acquired */
static void kfd_release_topology_device(struct kfd_topology_device *dev)
{
@@ -468,7 +450,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, offs, "simd_count",
- dev->gpu ? dev->node_props.simd_count : 0);
+ dev->gpu ? (dev->node_props.simd_count *
+ NUM_XCC(dev->gpu->xcc_mask)) : 0);
sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
dev->node_props.mem_banks_count);
sysfs_show_32bit_prop(buffer, offs, "caches_count",
@@ -492,7 +475,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "wave_front_size",
dev->node_props.wave_front_size);
sysfs_show_32bit_prop(buffer, offs, "array_count",
- dev->node_props.array_count);
+ dev->gpu ? (dev->node_props.array_count *
+ NUM_XCC(dev->gpu->xcc_mask)) : 0);
sysfs_show_32bit_prop(buffer, offs, "simd_arrays_per_engine",
dev->node_props.simd_arrays_per_engine);
sysfs_show_32bit_prop(buffer, offs, "cu_per_simd_array",
@@ -526,7 +510,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (dev->gpu) {
log_max_watch_addr =
- __ilog2_u32(dev->gpu->device_info.num_of_watch_points);
+ __ilog2_u32(dev->gpu->kfd->device_info.num_of_watch_points);
if (log_max_watch_addr) {
dev->node_props.capability |=
@@ -548,14 +532,17 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_64bit_prop(buffer, offs, "local_mem_size", 0ULL);
sysfs_show_32bit_prop(buffer, offs, "fw_version",
- dev->gpu->mec_fw_version);
+ dev->gpu->kfd->mec_fw_version);
sysfs_show_32bit_prop(buffer, offs, "capability",
dev->node_props.capability);
+ sysfs_show_64bit_prop(buffer, offs, "debug_prop",
+ dev->node_props.debug_prop);
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
- dev->gpu->sdma_fw_version);
+ dev->gpu->kfd->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
dev->gpu->adev->unique_id);
-
+ sysfs_show_32bit_prop(buffer, offs, "num_xcc",
+ NUM_XCC(dev->gpu->xcc_mask));
}
return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute",
@@ -1001,17 +988,6 @@ static void find_system_memory(const struct dmi_header *dm,
}
}
-/*
- * Performance counters information is not part of CRAT but we would like to
- * put them in the sysfs under topology directory for Thunk to get the data.
- * This function is called before updating the sysfs.
- */
-static int kfd_add_perf_to_topology(struct kfd_topology_device *kdev)
-{
- /* These are the only counters supported so far */
- return kfd_iommu_add_perf_counters(kdev);
-}
-
/* kfd_add_non_crat_information - Add information that is not currently
* defined in CRAT but is necessary for KFD topology
* @dev - topology device to which addition info is added
@@ -1026,25 +1002,6 @@ static void kfd_add_non_crat_information(struct kfd_topology_device *kdev)
/* TODO: For GPU node, rearrange code from kfd_topology_add_device */
}
-/* kfd_is_acpi_crat_invalid - CRAT from ACPI is valid only for AMD APU devices.
- * Ignore CRAT for all other devices. AMD APU is identified if both CPU
- * and GPU cores are present.
- * @device_list - topology device list created by parsing ACPI CRAT table.
- * @return - TRUE if invalid, FALSE is valid.
- */
-static bool kfd_is_acpi_crat_invalid(struct list_head *device_list)
-{
- struct kfd_topology_device *dev;
-
- list_for_each_entry(dev, device_list, list) {
- if (dev->node_props.cpu_cores_count &&
- dev->node_props.simd_count)
- return false;
- }
- pr_info("Ignoring ACPI CRAT on non-APU system\n");
- return true;
-}
-
int kfd_topology_init(void)
{
void *crat_image = NULL;
@@ -1075,48 +1032,25 @@ int kfd_topology_init(void)
*/
proximity_domain = 0;
- /*
- * Get the CRAT image from the ACPI. If ACPI doesn't have one
- * or if ACPI CRAT is invalid create a virtual CRAT.
- * NOTE: The current implementation expects all AMD APUs to have
- * CRAT. If no CRAT is available, it is assumed to be a CPU
- */
- ret = kfd_create_crat_image_acpi(&crat_image, &image_size);
- if (!ret) {
- ret = kfd_parse_crat_table(crat_image,
- &temp_topology_device_list,
- proximity_domain);
- if (ret ||
- kfd_is_acpi_crat_invalid(&temp_topology_device_list)) {
- kfd_release_topology_device_list(
- &temp_topology_device_list);
- kfd_destroy_crat_image(crat_image);
- crat_image = NULL;
- }
+ ret = kfd_create_crat_image_virtual(&crat_image, &image_size,
+ COMPUTE_UNIT_CPU, NULL,
+ proximity_domain);
+ cpu_only_node = 1;
+ if (ret) {
+ pr_err("Error creating VCRAT table for CPU\n");
+ return ret;
}
- if (!crat_image) {
- ret = kfd_create_crat_image_virtual(&crat_image, &image_size,
- COMPUTE_UNIT_CPU, NULL,
- proximity_domain);
- cpu_only_node = 1;
- if (ret) {
- pr_err("Error creating VCRAT table for CPU\n");
- return ret;
- }
-
- ret = kfd_parse_crat_table(crat_image,
- &temp_topology_device_list,
- proximity_domain);
- if (ret) {
- pr_err("Error parsing VCRAT table for CPU\n");
- goto err;
- }
+ ret = kfd_parse_crat_table(crat_image,
+ &temp_topology_device_list,
+ proximity_domain);
+ if (ret) {
+ pr_err("Error parsing VCRAT table for CPU\n");
+ goto err;
}
kdev = list_first_entry(&temp_topology_device_list,
struct kfd_topology_device, list);
- kfd_add_perf_to_topology(kdev);
down_write(&topology_lock);
kfd_topology_update_device_list(&temp_topology_device_list,
@@ -1157,10 +1091,10 @@ void kfd_topology_shutdown(void)
up_write(&topology_lock);
}
-static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
+static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu)
{
uint32_t hashout;
- uint32_t buf[7];
+ uint32_t buf[8];
uint64_t local_mem_size;
int i;
@@ -1177,8 +1111,9 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
buf[4] = gpu->adev->pdev->bus->number;
buf[5] = lower_32_bits(local_mem_size);
buf[6] = upper_32_bits(local_mem_size);
+ buf[7] = (ffs(gpu->xcc_mask) - 1) | (NUM_XCC(gpu->xcc_mask) << 16);
- for (i = 0, hashout = 0; i < 7; i++)
+ for (i = 0, hashout = 0; i < 8; i++)
hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
return hashout;
@@ -1188,7 +1123,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
* list then return NULL. This means a new topology device has to
* be created for this GPU.
*/
-static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
+static struct kfd_topology_device *kfd_assign_gpu(struct kfd_node *gpu)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
@@ -1201,8 +1136,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
/* Discrete GPUs need their own topology device list
* entries. Don't assign them to CPU/APU nodes.
*/
- if (!gpu->use_iommu_v2 &&
- dev->node_props.cpu_cores_count)
+ if (dev->node_props.cpu_cores_count)
continue;
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
@@ -1248,7 +1182,8 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
* for APUs - If CRAT from ACPI reports more than one bank, then
* all the banks will report the same mem_clk_max information
*/
- amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info,
+ dev->gpu->xcp);
list_for_each_entry(mem, &dev->mem_props, list)
mem->mem_clk_max = local_mem_info.mem_clk_max;
@@ -1275,7 +1210,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev,
CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
/* set gpu (dev) flags. */
} else {
- if (!dev->gpu->pci_atomic_requested ||
+ if (!dev->gpu->kfd->pci_atomic_requested ||
dev->gpu->adev->asic_type == CHIP_HAWAII)
link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
@@ -1323,10 +1258,16 @@ static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
continue;
/* Include the CPU peer in GPU hive if connected over xGMI. */
- if (!peer_dev->gpu && !peer_dev->node_props.hive_id &&
- dev->node_props.hive_id &&
- dev->gpu->adev->gmc.xgmi.connected_to_cpu)
+ if (!peer_dev->gpu &&
+ link->iolink_type == CRAT_IOLINK_TYPE_XGMI) {
+ /*
+ * If the GPU is not part of a GPU hive, use its pci
+ * device location as the hive ID to bind with the CPU.
+ */
+ if (!dev->node_props.hive_id)
+ dev->node_props.hive_id = pci_dev_id(dev->gpu->adev->pdev);
peer_dev->node_props.hive_id = dev->node_props.hive_id;
+ }
list_for_each_entry(inbound_link, &peer_dev->io_link_props,
list) {
@@ -1569,8 +1510,8 @@ static int kfd_dev_create_p2p_links(void)
if (dev == new_dev)
break;
if (!dev->gpu || !dev->gpu->adev ||
- (dev->gpu->hive_id &&
- dev->gpu->hive_id == new_dev->gpu->hive_id))
+ (dev->gpu->kfd->hive_id &&
+ dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id))
goto next;
/* check if node(s) is/are peer accessible in one direction or bi-direction */
@@ -1590,7 +1531,6 @@ out:
return ret;
}
-
/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
struct kfd_gpu_cache_info *pcache_info,
@@ -1723,7 +1663,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
/* kfd_fill_cache_non_crat_info - Fill GPU cache info using kfd_gpu_cache_info
* tables
*/
-static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_dev *kdev)
+static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev)
{
struct kfd_gpu_cache_info *pcache_info = NULL;
int i, j, k;
@@ -1805,7 +1745,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
pr_debug("Added [%d] GPU cache entries\n", num_of_entries);
}
-static int kfd_topology_add_device_locked(struct kfd_dev *gpu, uint32_t gpu_id,
+static int kfd_topology_add_device_locked(struct kfd_node *gpu, uint32_t gpu_id,
struct kfd_topology_device **dev)
{
int proximity_domain = ++topology_crat_proximity_domain;
@@ -1865,7 +1805,107 @@ err:
return res;
}
-int kfd_topology_add_device(struct kfd_dev *gpu)
+static void kfd_topology_set_dbg_firmware_support(struct kfd_topology_device *dev)
+{
+ bool firmware_supported = true;
+
+ if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0) &&
+ KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) {
+ uint32_t mes_api_rev = (dev->gpu->adev->mes.sched_version &
+ AMDGPU_MES_API_VERSION_MASK) >>
+ AMDGPU_MES_API_VERSION_SHIFT;
+ uint32_t mes_rev = dev->gpu->adev->mes.sched_version &
+ AMDGPU_MES_VERSION_MASK;
+
+ firmware_supported = (mes_api_rev >= 14) && (mes_rev >= 64);
+ goto out;
+ }
+
+ /*
+ * Note: Any unlisted devices here are assumed to support exception handling.
+ * Add additional checks here as needed.
+ */
+ switch (KFD_GC_VERSION(dev->gpu)) {
+ case IP_VERSION(9, 0, 1):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 459 + 32768;
+ break;
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 459;
+ break;
+ case IP_VERSION(9, 4, 1):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 60;
+ break;
+ case IP_VERSION(9, 4, 2):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 51;
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 1):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 144;
+ break;
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ firmware_supported = dev->gpu->kfd->mec_fw_version >= 89;
+ break;
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 3):
+ firmware_supported = false;
+ break;
+ default:
+ break;
+ }
+
+out:
+ if (firmware_supported)
+ dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED;
+}
+
+static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
+{
+ dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+
+ dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_SUPPORT |
+ HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED |
+ HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED;
+
+ if (kfd_dbg_has_ttmps_always_setup(dev->gpu))
+ dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID;
+
+ if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) {
+ if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3))
+ dev->node_props.debug_prop |=
+ HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9_4_3 |
+ HSA_DBG_WATCH_ADDR_MASK_HI_BIT_GFX9_4_3;
+ else
+ dev->node_props.debug_prop |=
+ HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 |
+ HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
+
+ if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2))
+ dev->node_props.capability |=
+ HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
+ } else {
+ dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
+ HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
+
+ if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0))
+ dev->node_props.capability |=
+ HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
+ }
+
+ kfd_topology_set_dbg_firmware_support(dev);
+}
+
+int kfd_topology_add_device(struct kfd_node *gpu)
{
uint32_t gpu_id;
struct kfd_topology_device *dev;
@@ -1875,7 +1915,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type];
gpu_id = kfd_generate_gpu_id(gpu);
- pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+ if (gpu->xcp && !gpu->xcp->ddev) {
+ dev_warn(gpu->adev->dev,
+ "Won't add GPU (ID: 0x%x) to topology since it has no drm node assigned.",
+ gpu_id);
+ return 0;
+ } else {
+ pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+ }
/* Check to see if this gpu device exists in the topology_device_list.
* If so, assign the gpu to that device,
@@ -1916,28 +1963,37 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.simd_arrays_per_engine =
cu_info.num_shader_arrays_per_engine;
- dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version;
+ dev->node_props.gfx_target_version =
+ gpu->kfd->device_info.gfx_target_version;
dev->node_props.vendor_id = gpu->adev->pdev->vendor;
dev->node_props.device_id = gpu->adev->pdev->device;
dev->node_props.capability |=
((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) &
HSA_CAP_ASIC_REVISION_MASK);
+
dev->node_props.location_id = pci_dev_id(gpu->adev->pdev);
+ if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3))
+ dev->node_props.location_id |= dev->gpu->node_id;
+
dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev);
dev->node_props.max_engine_clk_ccompute =
cpufreq_quick_get_max(0) / 1000;
- dev->node_props.drm_render_minor =
- gpu->shared_resources.drm_render_minor;
- dev->node_props.hive_id = gpu->hive_id;
+ if (gpu->xcp)
+ dev->node_props.drm_render_minor = gpu->xcp->ddev->render->index;
+ else
+ dev->node_props.drm_render_minor =
+ gpu->kfd->shared_resources.drm_render_minor;
+
+ dev->node_props.hive_id = gpu->kfd->hive_id;
dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu);
dev->node_props.num_sdma_xgmi_engines =
kfd_get_num_xgmi_sdma_engines(gpu);
dev->node_props.num_sdma_queues_per_engine =
- gpu->device_info.num_sdma_queues_per_engine -
- gpu->device_info.num_reserved_sdma_queues_per_engine;
+ gpu->kfd->device_info.num_sdma_queues_per_engine -
+ gpu->kfd->device_info.num_reserved_sdma_queues_per_engine;
dev->node_props.num_gws = (dev->gpu->gws &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
dev->gpu->adev->gds.gws_size : 0;
@@ -1966,23 +2022,18 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
break;
default:
- if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 0, 1))
- dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
- HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
- HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
- else
+ if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 0, 1))
WARN(1, "Unexpected ASIC family %u",
dev->gpu->adev->asic_type);
+ else
+ kfd_topology_set_capabilities(dev);
}
/*
* Overwrite ATS capability according to needs_iommu_device to fix
* potential missing corresponding bit in CRAT of BIOS.
*/
- if (dev->gpu->use_iommu_v2)
- dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
- else
- dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
+ dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
/* Fix errors in CZ CRAT.
* simd_count: Carrizo CRAT reports wrong simd_count, probably
@@ -2007,9 +2058,13 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ?
HSA_CAP_RASEVENTNOTIFY : 0;
- if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev))
+ if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev))
dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED;
+ if (dev->gpu->adev->gmc.is_app_apu ||
+ dev->gpu->adev->gmc.xgmi.connected_to_cpu)
+ dev->node_props.capability |= HSA_CAP_FLAGS_COHERENTHOSTACCESS;
+
kfd_debug_print_topology();
kfd_notify_gpu_change(gpu_id, 1);
@@ -2079,7 +2134,7 @@ static void kfd_topology_update_io_links(int proximity_domain)
}
}
-int kfd_topology_remove_device(struct kfd_dev *gpu)
+int kfd_topology_remove_device(struct kfd_node *gpu)
{
struct kfd_topology_device *dev, *tmp;
uint32_t gpu_id;
@@ -2119,7 +2174,7 @@ int kfd_topology_remove_device(struct kfd_dev *gpu)
* Return - 0: On success (@kdev will be NULL for non GPU nodes)
* -1: If end of list
*/
-int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
+int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev)
{
struct kfd_topology_device *top_dev;
@@ -2173,29 +2228,6 @@ int kfd_numa_node_to_apic_id(int numa_node_id)
return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id));
}
-void kfd_double_confirm_iommu_support(struct kfd_dev *gpu)
-{
- struct kfd_topology_device *dev;
-
- gpu->use_iommu_v2 = false;
-
- if (!gpu->device_info.needs_iommu_device)
- return;
-
- down_read(&topology_lock);
-
- /* Only use IOMMUv2 if there is an APU topology node with no GPU
- * assigned yet. This GPU will be assigned to it.
- */
- list_for_each_entry(dev, &topology_device_list, list)
- if (dev->node_props.cpu_cores_count &&
- dev->node_props.simd_count &&
- !dev->gpu)
- gpu->use_iommu_v2 = true;
-
- up_read(&topology_lock);
-}
-
#if defined(CONFIG_DEBUG_FS)
int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index fca30d00a9bb..dea32a9e5506 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -31,6 +31,14 @@
#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32
+#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 6
+#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9_4_3 7
+#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 7
+#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT \
+ (29 << HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT)
+#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_GFX9_4_3 \
+ (30 << HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT)
+
struct kfd_node_properties {
uint64_t hive_id;
uint32_t cpu_cores_count;
@@ -42,6 +50,7 @@ struct kfd_node_properties {
uint32_t cpu_core_id_base;
uint32_t simd_id_base;
uint32_t capability;
+ uint64_t debug_prop;
uint32_t max_waves_per_simd;
uint32_t lds_size_in_kb;
uint32_t gds_size_in_kb;
@@ -75,7 +84,7 @@ struct kfd_mem_properties {
uint32_t flags;
uint32_t width;
uint32_t mem_clk_max;
- struct kfd_dev *gpu;
+ struct kfd_node *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -93,7 +102,7 @@ struct kfd_cache_properties {
uint32_t cache_latency;
uint32_t cache_type;
uint8_t sibling_map[CACHE_SIBLINGMAP_SIZE];
- struct kfd_dev *gpu;
+ struct kfd_node *gpu;
struct kobject *kobj;
struct attribute attr;
uint32_t sibling_map_size;
@@ -113,7 +122,7 @@ struct kfd_iolink_properties {
uint32_t max_bandwidth;
uint32_t rec_transfer_size;
uint32_t flags;
- struct kfd_dev *gpu;
+ struct kfd_node *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -135,7 +144,7 @@ struct kfd_topology_device {
struct list_head io_link_props;
struct list_head p2p_link_props;
struct list_head perf_props;
- struct kfd_dev *gpu;
+ struct kfd_node *gpu;
struct kobject *kobj_node;
struct kobject *kobj_mem;
struct kobject *kobj_cache;
diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
index e3f3b0b93a59..10138676f27f 100644
--- a/drivers/gpu/drm/amd/amdkfd/soc15_int.h
+++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h
@@ -40,6 +40,7 @@
#define SOC15_VMID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 24 & 0xf)
#define SOC15_VMID_TYPE_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 31 & 0x1)
#define SOC15_PASID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[3]) & 0xffff)
+#define SOC15_NODEID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[3]) >> 16 & 0xff)
#define SOC15_CONTEXT_ID0_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[4]))
#define SOC15_CONTEXT_ID1_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[5]))
#define SOC15_CONTEXT_ID2_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[6]))
diff --git a/drivers/gpu/drm/amd/amdxcp/Makefile b/drivers/gpu/drm/amd/amdxcp/Makefile
new file mode 100644
index 000000000000..870501a4bb8c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdxcp/Makefile
@@ -0,0 +1,25 @@
+#
+# Copyright 2023 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+
+amdxcp-y := amdgpu_xcp_drv.o
+
+obj-$(CONFIG_DRM_AMDGPU) += amdxcp.o
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
new file mode 100644
index 000000000000..353597fc908d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_drv.h>
+
+#include "amdgpu_xcp_drv.h"
+
+#define MAX_XCP_PLATFORM_DEVICE 64
+
+struct xcp_device {
+ struct drm_device drm;
+ struct platform_device *pdev;
+};
+
+static const struct drm_driver amdgpu_xcp_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_RENDER,
+ .name = "amdgpu_xcp_drv",
+ .major = 1,
+ .minor = 0,
+};
+
+static int pdev_num;
+static struct xcp_device *xcp_dev[MAX_XCP_PLATFORM_DEVICE];
+
+int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
+{
+ struct platform_device *pdev;
+ struct xcp_device *pxcp_dev;
+ int ret;
+
+ if (pdev_num >= MAX_XCP_PLATFORM_DEVICE)
+ return -ENODEV;
+
+ pdev = platform_device_register_simple("amdgpu_xcp", pdev_num, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_unregister;
+ }
+
+ pxcp_dev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_xcp_driver, struct xcp_device, drm);
+ if (IS_ERR(pxcp_dev)) {
+ ret = PTR_ERR(pxcp_dev);
+ goto out_devres;
+ }
+
+ xcp_dev[pdev_num] = pxcp_dev;
+ xcp_dev[pdev_num]->pdev = pdev;
+ *ddev = &pxcp_dev->drm;
+ pdev_num++;
+
+ return 0;
+
+out_devres:
+ devres_release_group(&pdev->dev, NULL);
+out_unregister:
+ platform_device_unregister(pdev);
+
+ return ret;
+}
+EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc);
+
+void amdgpu_xcp_drv_release(void)
+{
+ for (--pdev_num; pdev_num >= 0; --pdev_num) {
+ devres_release_group(&xcp_dev[pdev_num]->pdev->dev, NULL);
+ platform_device_unregister(xcp_dev[pdev_num]->pdev);
+ xcp_dev[pdev_num]->pdev = NULL;
+ xcp_dev[pdev_num] = NULL;
+ }
+ pdev_num = 0;
+}
+EXPORT_SYMBOL(amdgpu_xcp_drv_release);
+
+static void __exit amdgpu_xcp_drv_exit(void)
+{
+ amdgpu_xcp_drv_release();
+}
+
+module_exit(amdgpu_xcp_drv_exit);
+
+MODULE_AUTHOR("AMD linux driver team");
+MODULE_DESCRIPTION("AMD XCP PLATFORM DEVICES");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
new file mode 100644
index 000000000000..c1c4b679bf95
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AMDGPU_XCP_DRV_H_
+#define _AMDGPU_XCP_DRV_H_
+
+int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev);
+void amdgpu_xcp_drv_release(void);
+#endif /* _AMDGPU_XCP_DRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 2d8e55e29637..901d1961b739 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -5,10 +5,10 @@ menu "Display Engine Configuration"
config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
- depends on BROKEN || !CC_IS_CLANG || X86_64 || SPARC64 || ARM64
+ depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
- select DRM_AMD_DC_FP if (X86 || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
+ select DRM_AMD_DC_FP if (X86 || LOONGARCH || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG))
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
@@ -42,16 +42,13 @@ config DEBUG_KERNEL_DC
Choose this option if you want to hit kdgb_break in assert.
config DRM_AMD_SECURE_DISPLAY
- bool "Enable secure display support"
- depends on DEBUG_FS
- depends on DRM_AMD_DC_FP
- help
- Choose this option if you want to
- support secure display
-
- This option enables the calculation
- of crc of specific region via debugfs.
- Cooperate with specific DMCU FW.
+ bool "Enable secure display support"
+ depends on DEBUG_FS
+ depends on DRM_AMD_DC_FP
+ help
+ Choose this option if you want to support secure display
+ This option enables the calculation of crc of specific region via
+ debugfs. Cooperate with specific DMCU FW.
endmenu
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 249b073f6a23..8bf94920d23e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -38,7 +38,7 @@ AMDGPUDM += dc_fpu.o
endif
ifneq ($(CONFIG_DRM_AMD_DC),)
-AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o amdgpu_dm_replay.o
endif
AMDGPUDM += amdgpu_dm_hdcp.o
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8b4b186c57f5..268cb99a4c4b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -245,51 +245,52 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
*/
static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
+ struct amdgpu_crtc *acrtc = NULL;
+
if (crtc >= adev->mode_info.num_crtc)
return 0;
- else {
- struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
- if (acrtc->dm_irq_params.stream == NULL) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
- crtc);
- return 0;
- }
+ acrtc = adev->mode_info.crtcs[crtc];
- return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
}
+
+ return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
}
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
u32 v_blank_start, v_blank_end, h_position, v_position;
+ struct amdgpu_crtc *acrtc = NULL;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
- else {
- struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
- if (acrtc->dm_irq_params.stream == NULL) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
- crtc);
- return 0;
- }
+ acrtc = adev->mode_info.crtcs[crtc];
- /*
- * TODO rework base driver to use values directly.
- * for now parse it back into reg-format
- */
- dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
- &v_blank_start,
- &v_blank_end,
- &h_position,
- &v_position);
-
- *position = v_position | (h_position << 16);
- *vbl = v_blank_start | (v_blank_end << 16);
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
}
+ /*
+ * TODO rework base driver to use values directly.
+ * for now parse it back into reg-format
+ */
+ dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ *position = v_position | (h_position << 16);
+ *vbl = v_blank_start | (v_blank_end << 16);
+
return 0;
}
@@ -365,6 +366,14 @@ static inline void reverse_planes_order(struct dc_surface_update *array_of_surfa
* adjustments and preparation before calling it. This function is a wrapper
* for the dc_update_planes_and_stream that does any required configuration
* before passing control to DC.
+ *
+ * @dc: Display Core control structure
+ * @update_type: specify whether it is FULL/MEDIUM/FAST update
+ * @planes_count: planes count to update
+ * @stream: stream state
+ * @stream_update: stream update
+ * @array_of_surface_update: dc surface update pointer
+ *
*/
static inline bool update_planes_and_stream_adapter(struct dc *dc,
int update_type,
@@ -416,12 +425,12 @@ static void dm_pflip_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
- DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
- amdgpu_crtc->pflip_status,
- AMDGPU_FLIP_SUBMITTED,
- amdgpu_crtc->crtc_id,
- amdgpu_crtc);
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED,
+ amdgpu_crtc->crtc_id,
+ amdgpu_crtc);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return;
}
@@ -875,7 +884,7 @@ static int dm_set_powergating_state(void *handle,
}
/* Prototypes of private functions */
-static int dm_early_init(void* handle);
+static int dm_early_init(void *handle);
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
@@ -1274,7 +1283,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
- pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
+ pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
@@ -1339,6 +1348,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
if (amdgpu_in_reset(adev))
goto skip;
+ if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+ goto skip;
+ }
+
mutex_lock(&adev->dm.dc_lock);
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dc_link_dp_handle_automated_test(dc_link);
@@ -1357,8 +1375,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
DP_TEST_RESPONSE,
&test_response.raw,
sizeof(test_response));
- }
- else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+ } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
/* offload_work->data is from handle_hpd_rx_irq->
@@ -1546,7 +1563,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
- if(amdgpu_dm_irq_init(adev)) {
+ if (amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
goto error;
}
@@ -1622,9 +1639,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
- if (init_data.flags.gpu_vm_support &&
- (amdgpu_sg_display == 0))
- init_data.flags.gpu_vm_support = false;
+ if (init_data.flags.gpu_vm_support)
+ init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;
@@ -1646,11 +1662,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
- /* Disable SubVP + DRR config by default */
- init_data.flags.disable_subvp_drr = true;
- if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR)
- init_data.flags.disable_subvp_drr = false;
-
init_data.flags.seamless_boot_edp_requested = false;
if (check_seamless_boot_capability(adev)) {
@@ -1672,9 +1683,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.dc = dc_create(&init_data);
if (adev->dm.dc) {
- DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
+ DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ dce_version_to_string(adev->dm.dc->ctx->dce_version));
} else {
- DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER,
+ dce_version_to_string(adev->dm.dc->ctx->dce_version));
goto error;
}
@@ -1691,9 +1704,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;
- if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
adev->dm.dc->debug.disable_dsc = true;
- }
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
adev->dm.dc->debug.disable_clock_gate = true;
@@ -1776,12 +1788,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_init_callbacks(adev->dm.dc, &init_params);
}
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
- if (!adev->dm.secure_display_ctxs) {
- DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n");
- }
-#endif
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
@@ -1816,9 +1822,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
* It is expected that DMUB will resend any pending notifications at this point, for
* example HPD from DPIA.
*/
- if (dc_is_dmub_outbox_supported(adev->dm.dc))
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
dc_enable_dmub_outbox(adev->dm.dc);
+ /* DPIA trace goes to dmesg logs only if outbox is enabled */
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE)
+ dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
+ }
+
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
@@ -1840,6 +1851,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctxs)
+ DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+#endif
DRM_DEBUG_DRIVER("KMS initialized.\n");
@@ -1938,8 +1954,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
mutex_destroy(&adev->dm.audio_lock);
mutex_destroy(&adev->dm.dc_lock);
mutex_destroy(&adev->dm.dpia_aux_lock);
-
- return;
}
static int load_dmcu_fw(struct amdgpu_device *adev)
@@ -1948,7 +1962,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
int r;
const struct dmcu_firmware_header_v1_0 *hdr;
- switch(adev->asic_type) {
+ switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
@@ -2479,20 +2493,25 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
if (acrtc && state->stream_status[i].plane_count != 0) {
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
- DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
- acrtc->crtc_id, enable ? "en" : "dis", rc);
if (rc)
DRM_WARN("Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
if (enable) {
- rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base);
- if (rc)
- DRM_WARN("Failed to enable vblank interrupts\n");
- } else {
- amdgpu_dm_crtc_disable_vblank(&acrtc->base);
- }
+ if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
+
+ if (rc)
+ DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+ /* During gpu-reset we disable and then enable vblank irq, so
+ * don't use amdgpu_irq_get/put() to avoid refcount change.
+ */
+ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
}
}
@@ -2700,7 +2719,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
struct dc_scaling_info scaling_infos[MAX_SURFACES];
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update;
- } * bundle;
+ } *bundle;
int k, m;
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
@@ -2730,8 +2749,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
cleanup:
kfree(bundle);
-
- return;
}
static int dm_resume(void *handle)
@@ -2852,7 +2869,7 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->dc_link->type == dc_connection_mst_branch)
+ if (aconnector && aconnector->mst_root)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -2945,8 +2962,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.set_powergating_state = dm_set_powergating_state,
};
-const struct amdgpu_ip_block_version dm_ip_block =
-{
+const struct amdgpu_ip_block_version dm_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 1,
.minor = 0,
@@ -2991,9 +3007,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
- if (caps->ext_caps->bits.oled == 1 /*||
- caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
- caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
+ if (caps->ext_caps->bits.oled == 1
+ /*
+ * ||
+ * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+ * caps->ext_caps->bits.hdr_aux_backlight_control == 1
+ */)
caps->aux_support = true;
if (amdgpu_backlight == 0)
@@ -3227,84 +3246,6 @@ static void handle_hpd_irq(void *param)
}
-static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
-{
- u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
- u8 dret;
- bool new_irq_handled = false;
- int dpcd_addr;
- int dpcd_bytes_to_read;
-
- const int max_process_count = 30;
- int process_count = 0;
-
- const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
-
- if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
- dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
- /* DPCD 0x200 - 0x201 for downstream IRQ */
- dpcd_addr = DP_SINK_COUNT;
- } else {
- dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
- /* DPCD 0x2002 - 0x2005 for downstream IRQ */
- dpcd_addr = DP_SINK_COUNT_ESI;
- }
-
- dret = drm_dp_dpcd_read(
- &aconnector->dm_dp_aux.aux,
- dpcd_addr,
- esi,
- dpcd_bytes_to_read);
-
- while (dret == dpcd_bytes_to_read &&
- process_count < max_process_count) {
- u8 retry;
- dret = 0;
-
- process_count++;
-
- DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
- /* handle HPD short pulse irq */
- if (aconnector->mst_mgr.mst_state)
- drm_dp_mst_hpd_irq(
- &aconnector->mst_mgr,
- esi,
- &new_irq_handled);
-
- if (new_irq_handled) {
- /* ACK at DPCD to notify down stream */
- const int ack_dpcd_bytes_to_write =
- dpcd_bytes_to_read - 1;
-
- for (retry = 0; retry < 3; retry++) {
- u8 wret;
-
- wret = drm_dp_dpcd_write(
- &aconnector->dm_dp_aux.aux,
- dpcd_addr + 1,
- &esi[1],
- ack_dpcd_bytes_to_write);
- if (wret == ack_dpcd_bytes_to_write)
- break;
- }
-
- /* check if there is new irq to be handled */
- dret = drm_dp_dpcd_read(
- &aconnector->dm_dp_aux.aux,
- dpcd_addr,
- esi,
- dpcd_bytes_to_read);
-
- new_irq_handled = false;
- } else {
- break;
- }
- }
-
- if (process_count == max_process_count)
- DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
-}
-
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
@@ -3366,7 +3307,23 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
- dm_handle_mst_sideband_msg(aconnector);
+ bool skip = false;
+
+ /*
+ * DOWN_REP_MSG_RDY is also handled by polling method
+ * mgr->cbs->poll_hpd_irq()
+ */
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_mst_msg_rdy_event;
+
+ if (!skip)
+ offload_wq->is_handling_mst_msg_rdy_event = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
goto out;
}
@@ -3457,7 +3414,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
aconnector = to_amdgpu_dm_connector(connector);
dc_link = aconnector->dc_link;
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
int_params.irq_source = dc_link->irq_source_hpd;
@@ -3466,7 +3423,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
(void *) aconnector);
}
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
/* Also register for DP short pulse (hpd_rx). */
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
@@ -3475,11 +3432,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq,
(void *) aconnector);
-
- if (adev->dm.hpd_rx_offload_wq)
- adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
- aconnector;
}
+
+ if (adev->dm.hpd_rx_offload_wq)
+ adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+ aconnector;
}
}
@@ -3492,7 +3449,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0};
int r;
int i;
- unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
@@ -3506,11 +3463,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
- * for acknowledging and handling. */
+ * for acknowledging and handling.
+ */
/* Use VBLANK interrupt */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
if (r) {
DRM_ERROR("Failed to add crtc irq id!\n");
return r;
@@ -3518,7 +3476,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
- dc_interrupt_to_irq_source(dc, i+1 , 0);
+ dc_interrupt_to_irq_source(dc, i + 1, 0);
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
@@ -3574,7 +3532,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0};
int r;
int i;
- unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
if (adev->family >= AMDGPU_FAMILY_AI)
client_id = SOC15_IH_CLIENTID_DCE;
@@ -3591,7 +3549,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
- * for acknowledging and handling. */
+ * for acknowledging and handling.
+ */
/* Use VBLANK interrupt */
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
@@ -4038,7 +3997,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
}
static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
- unsigned *min, unsigned *max)
+ unsigned int *min, unsigned int *max)
{
if (!caps)
return 0;
@@ -4058,7 +4017,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness)
{
- unsigned min, max;
+ unsigned int min, max;
if (!get_brightness_range(caps, &min, &max))
return brightness;
@@ -4071,7 +4030,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness)
{
- unsigned min, max;
+ unsigned int min, max;
if (!get_brightness_range(caps, &min, &max))
return brightness;
@@ -4137,6 +4096,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
int bl_idx)
{
+ int ret;
struct amdgpu_dm_backlight_caps caps;
struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
@@ -4151,13 +4111,14 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
if (!rc)
return dm->brightness[bl_idx];
return convert_brightness_to_user(&caps, avg);
- } else {
- int ret = dc_link_get_backlight_level(link);
-
- if (ret == DC_ERROR_UNEXPECTED)
- return dm->brightness[bl_idx];
- return convert_brightness_to_user(&caps, ret);
}
+
+ ret = dc_link_get_backlight_level(link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return dm->brightness[bl_idx];
+
+ return convert_brightness_to_user(&caps, ret);
}
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
@@ -4551,7 +4512,6 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
drm_atomic_private_obj_fini(&dm->atomic_obj);
- return;
}
/******************************************************************************
@@ -5052,11 +5012,7 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane,
s32 y, s32 width, s32 height,
int *i, bool ffu)
{
- if (*i > DC_MAX_DIRTY_RECTS)
- return;
-
- if (*i == DC_MAX_DIRTY_RECTS)
- goto out;
+ WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
dirty_rect->x = x;
dirty_rect->y = y;
@@ -5072,7 +5028,6 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane,
"[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
plane->base.id, x, y, width, height);
-out:
(*i)++;
}
@@ -5159,6 +5114,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
*dirty_regions_changed = bb_changed;
+ if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
+ goto ffu;
+
if (bb_changed) {
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
new_plane_state->crtc_x,
@@ -5188,9 +5146,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
new_plane_state->crtc_h, &i, false);
}
- if (i > DC_MAX_DIRTY_RECTS)
- goto ffu;
-
flip_addrs->dirty_rect_count = i;
return;
@@ -5326,21 +5281,44 @@ get_aspect_ratio(const struct drm_display_mode *mode_in)
}
static enum dc_color_space
-get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
+get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
+ const struct drm_connector_state *connector_state)
{
enum dc_color_space color_space = COLOR_SPACE_SRGB;
- switch (dc_crtc_timing->pixel_encoding) {
- case PIXEL_ENCODING_YCBCR422:
- case PIXEL_ENCODING_YCBCR444:
- case PIXEL_ENCODING_YCBCR420:
- {
+ switch (connector_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT601_YCC:
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ break;
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ break;
+ case DRM_MODE_COLORIMETRY_OPRGB:
+ color_space = COLOR_SPACE_ADOBERGB;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
+ else
+ color_space = COLOR_SPACE_2020_YCBCR;
+ break;
+ case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
+ default:
+ if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
+ color_space = COLOR_SPACE_SRGB;
/*
* 27030khz is the separation point between HDTV and SDTV
* according to HDMI spec, we use YCbCr709 and YCbCr601
* respectively
*/
- if (dc_crtc_timing->pix_clk_100hz > 270300) {
+ } else if (dc_crtc_timing->pix_clk_100hz > 270300) {
if (dc_crtc_timing->flags.Y_ONLY)
color_space =
COLOR_SPACE_YCBCR709_LIMITED;
@@ -5353,15 +5331,6 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
else
color_space = COLOR_SPACE_YCBCR601;
}
-
- }
- break;
- case PIXEL_ENCODING_RGB:
- color_space = COLOR_SPACE_SRGB;
- break;
-
- default:
- WARN_ON(1);
break;
}
@@ -5374,6 +5343,7 @@ static bool adjust_colour_depth_from_display_info(
{
enum dc_color_depth depth = timing_out->display_color_depth;
int normalized_clk;
+
do {
normalized_clk = timing_out->pix_clk_100hz / 10;
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
@@ -5500,7 +5470,7 @@ static void fill_stream_properties_from_drm_display_mode(
}
}
- stream->output_color_space = get_output_color_space(timing_out);
+ stream->output_color_space = get_output_color_space(timing_out, connector_state);
}
static void fill_audio_info(struct audio_info *audio_info,
@@ -5589,6 +5559,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
+
sink_init_data.link = aconnector->dc_link;
sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
@@ -5712,7 +5683,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
return &aconnector->freesync_vid_base;
/* Find the preferred mode */
- list_for_each_entry (m, list_head, head) {
+ list_for_each_entry(m, list_head, head) {
if (m->type & DRM_MODE_TYPE_PREFERRED) {
m_pref = m;
break;
@@ -5736,7 +5707,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
* For some monitors, preferred mode is not the mode with highest
* supported refresh rate.
*/
- list_for_each_entry (m, list_head, head) {
+ list_for_each_entry(m, list_head, head) {
current_refresh = drm_mode_vrefresh(m);
if (m->hdisplay == m_pref->hdisplay &&
@@ -5829,6 +5800,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
edp_min_bpp_x16, edp_max_bpp_x16,
dsc_caps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&bw_range)) {
if (bw_range.max_kbps < link_bw_in_kbps) {
@@ -5837,6 +5809,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
&dsc_options,
0,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&dsc_cfg)) {
stream->timing.dsc_cfg = dsc_cfg;
stream->timing.flags.DSC = 1;
@@ -5851,6 +5824,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
&dsc_options,
link_bw_in_kbps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&dsc_cfg)) {
stream->timing.dsc_cfg = dsc_cfg;
stream->timing.flags.DSC = 1;
@@ -5894,12 +5868,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
&dsc_options,
link_bandwidth_kbps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
- timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link));
max_supported_bw_in_kbps = link_bandwidth_kbps;
dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
@@ -5911,6 +5887,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
&dsc_options,
dsc_max_supported_bw_in_kbps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
@@ -5942,15 +5919,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
{
struct drm_display_mode *preferred_mode = NULL;
struct drm_connector *drm_connector;
- const struct drm_connector_state *con_state =
- dm_state ? &dm_state->base : NULL;
+ const struct drm_connector_state *con_state = &dm_state->base;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode;
struct drm_display_mode saved_mode;
struct drm_display_mode *freesync_mode = NULL;
bool native_mode_found = false;
bool recalculate_timing = false;
- bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+ bool scale = dm_state->scaling != RMX_OFF;
int mode_refresh;
int preferred_refresh = 0;
enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
@@ -6009,12 +5985,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
* This may not be an error, the use case is when we have no
* usermode calls to reset and set mode upon hotplug. In this
* case, we call set mode ourselves to restore the previous mode
- * and the modelist may not be filled in in time.
+ * and the modelist may not be filled in time.
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
- recalculate_timing = amdgpu_freesync_vid_mode &&
- is_freesync_video_mode(&mode, aconnector);
+ recalculate_timing = is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
@@ -6029,13 +6004,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (recalculate_timing)
drm_mode_set_crtcinfo(&saved_mode, 0);
- else if (!dm_state)
+ else
drm_mode_set_crtcinfo(&mode, 0);
/*
- * If scaling is enabled and refresh rate didn't change
- * we copy the vic and polarities of the old timings
- */
+ * If scaling is enabled and refresh rate didn't change
+ * we copy the vic and polarities of the old timings
+ */
if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(
stream, &mode, &aconnector->base, con_state, NULL,
@@ -6070,7 +6045,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
- if (stream->link->psr_settings.psr_feature_enabled) {
+ if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
@@ -6342,6 +6317,31 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
return 0;
}
+static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dc_link *dc_link = aconnector->dc_link;
+ struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
+ struct edid *edid;
+
+ if (!connector->edid_override)
+ return;
+
+ drm_edid_override_connector_update(&aconnector->base);
+ edid = aconnector->base.edid_blob_ptr->data;
+ aconnector->edid = edid;
+
+ /* Update emulated (virtual) sink's EDID */
+ if (dc_em_sink && dc_link) {
+ memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
+ memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
+ dm_helpers_parse_edid_caps(
+ dc_link,
+ &dc_em_sink->dc_edid,
+ &dc_em_sink->edid_caps);
+ }
+}
+
static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.reset = amdgpu_dm_connector_funcs_reset,
.detect = amdgpu_dm_connector_detect,
@@ -6352,7 +6352,8 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
.late_register = amdgpu_dm_connector_late_register,
- .early_unregister = amdgpu_dm_connector_unregister
+ .early_unregister = amdgpu_dm_connector_unregister,
+ .force = amdgpu_dm_connector_funcs_force
};
static int get_modes(struct drm_connector *connector)
@@ -6369,11 +6370,19 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
struct edid *edid;
if (!aconnector->base.edid_blob_ptr) {
- DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
- aconnector->base.name);
+ /* if connector->edid_override valid, pass
+ * it to edid_override to edid_blob_ptr
+ */
- aconnector->base.force = DRM_FORCE_OFF;
- return;
+ drm_edid_override_connector_update(&aconnector->base);
+
+ if (!aconnector->base.edid_blob_ptr) {
+ DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
+ aconnector->base.name);
+
+ aconnector->base.force = DRM_FORCE_OFF;
+ return;
+ }
}
edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
@@ -6558,7 +6567,9 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
goto fail;
}
- stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
+ stream = create_validate_stream_for_sink(aconnector, mode,
+ to_dm_connector_state(connector->state),
+ NULL);
if (stream) {
dc_stream_release(stream);
result = MODE_OK;
@@ -6652,6 +6663,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
if (!crtc)
return 0;
+ if (new_con_state->colorspace != old_con_state->colorspace) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
struct dc_info_packet hdr_infopacket;
@@ -6674,7 +6693,7 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
* set is permissible, however. So only force a
* modeset if we're entering or exiting HDR.
*/
- new_crtc_state->mode_changed =
+ new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
!old_con_state->hdr_output_metadata ||
!new_con_state->hdr_output_metadata;
}
@@ -6737,7 +6756,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
int clock, bpp = 0;
bool is_y420 = false;
- if (!aconnector->mst_output_port || !aconnector->dc_sink)
+ if (!aconnector->mst_output_port)
return 0;
mst_port = aconnector->mst_output_port;
@@ -6755,6 +6774,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
+
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
aconnector->force_yuv420_output;
color_depth = convert_color_depth_from_display_info(connector,
@@ -7073,7 +7093,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
{
struct drm_display_mode *m;
- list_for_each_entry (m, &aconnector->base.probed_modes, head) {
+ list_for_each_entry(m, &aconnector->base.probed_modes, head) {
if (drm_mode_equal(m, mode))
return true;
}
@@ -7163,7 +7183,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- if (!(amdgpu_freesync_vid_mode && edid))
+ if (!edid)
return;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -7199,6 +7219,12 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
return amdgpu_dm_connector->num_modes;
}
+static const u32 supported_colorspaces =
+ BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
+ BIT(DRM_MODE_COLORIMETRY_OPRGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
+
void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
int connector_type,
@@ -7227,6 +7253,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
mutex_init(&aconnector->hpd_lock);
+ mutex_init(&aconnector->handle_mst_msg_ready);
/*
* configure support HPD hot plug connector_>polled default value is 0
@@ -7279,6 +7306,15 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
adev->mode_info.abm_level_property, 0);
}
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
+ drm_connector_attach_colorspace_property(&aconnector->base);
+ } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
+ drm_connector_attach_colorspace_property(&aconnector->base);
+ }
+
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -7377,7 +7413,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link->priv = aconnector;
- DRM_DEBUG_DRIVER("%s()\n", __func__);
i2c = create_i2c(link->ddc, link->link_index, &res);
if (!i2c) {
@@ -7750,7 +7785,7 @@ static void update_freesync_state_on_stream(
aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
- if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
+ if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) {
pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
if (aconn->vsdb_info.amd_vsdb_version == 1)
@@ -7905,7 +7940,6 @@ static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
- struct dc_state *dc_state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
struct drm_crtc *pcrtc,
@@ -8048,7 +8082,17 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* Only allow immediate flips for fast updates that don't
* change memory domain, FB pitch, DCC state, rotation or
* mirroring.
+ *
+ * dm_crtc_helper_atomic_check() only accepts async flips with
+ * fast updates.
*/
+ if (crtc->state->async_flip &&
+ (acrtc_state->update_type != UPDATE_TYPE_FAST ||
+ get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
+ drm_warn_once(state->dev,
+ "[PLANE:%d:%s] async flip with non-fast update\n",
+ plane->base.id, plane->name);
+
bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip &&
acrtc_state->update_type == UPDATE_TYPE_FAST &&
@@ -8091,8 +8135,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* DRI3/Present extension with defined target_msc.
*/
last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
- }
- else {
+ } else {
/* For variable refresh rate mode only:
* Get vblank of last completed flip to avoid > 1 vrr
* flips per video frame by use of throttling, but allow
@@ -8193,6 +8236,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
bundle->stream_update.abm_level = &acrtc_state->abm_level;
+ mutex_lock(&dm->dc_lock);
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+
/*
* If FreeSync state on the stream has changed then we need to
* re-adjust the min/max bounds now that DC doesn't handle this
@@ -8206,10 +8255,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
mutex_lock(&dm->dc_lock);
- if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
-
update_planes_and_stream_adapter(dm->dc,
acrtc_state->update_type,
planes_count,
@@ -8376,55 +8421,20 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat
stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
}
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
{
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_display_manager *dm = &adev->dm;
- struct dm_atomic_state *dm_state;
- struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
- u32 i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned long flags;
- bool wait_for_vblank = true;
- struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- int crtc_disable_count = 0;
bool mode_set_reset_required = false;
- int r;
-
- trace_amdgpu_dm_atomic_commit_tail_begin(state);
-
- r = drm_atomic_helper_wait_for_fences(dev, state, false);
- if (unlikely(r))
- DRM_ERROR("Waiting for fences timed out!");
-
- drm_atomic_helper_update_legacy_modeset_state(dev, state);
- drm_dp_mst_atomic_wait_for_dependencies(state);
-
- dm_state = dm_atomic_get_new_state(state);
- if (dm_state && dm_state->context) {
- dc_state = dm_state->context;
- } else {
- /* No state changes, retain current state. */
- dc_state_temp = dc_create_state(dm->dc);
- ASSERT(dc_state_temp);
- dc_state = dc_state_temp;
- dc_resource_state_copy_construct_current(dm->dc, dc_state);
- }
+ u32 i;
- for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -8447,9 +8457,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
drm_dbg_state(state->dev,
- "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
- "planes_changed:%d, mode_changed:%d,active_changed:%d,"
- "connectors_changed:%d\n",
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
acrtc->crtc_id,
new_crtc_state->enable,
new_crtc_state->active,
@@ -8522,24 +8530,22 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
} /* for_each_crtc_in_state() */
- if (dc_state) {
- /* if there mode set or reset, disable eDP PSR */
- if (mode_set_reset_required) {
- if (dm->vblank_control_workqueue)
- flush_workqueue(dm->vblank_control_workqueue);
+ /* if there mode set or reset, disable eDP PSR */
+ if (mode_set_reset_required) {
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
- amdgpu_dm_psr_disable_all(dm);
- }
+ amdgpu_dm_psr_disable_all(dm);
+ }
- dm_enable_per_frame_crtc_master_sync(dc_state);
- mutex_lock(&dm->dc_lock);
- WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
+ dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
+ WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
- /* Allow idle optimization when vblank count is 0 for display off */
- if (dm->active_vblank_irq_count == 0)
- dc_allow_idle_optimizations(dm->dc, true);
- mutex_unlock(&dm->dc_lock);
- }
+ /* Allow idle optimization when vblank count is 0 for display off */
+ if (dm->active_vblank_irq_count == 0)
+ dc_allow_idle_optimizations(dm->dc, true);
+ mutex_unlock(&dm->dc_lock);
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -8559,6 +8565,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->otg_inst = status->primary_otg_inst;
}
}
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
@@ -8681,13 +8725,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct dc_surface_update dummy_updates[MAX_SURFACES];
+ struct dc_surface_update *dummy_updates;
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
bool abm_changed, hdr_changed, scaling_changed;
- memset(&dummy_updates, 0, sizeof(dummy_updates));
memset(&stream_update, 0, sizeof(stream_update));
if (acrtc) {
@@ -8746,6 +8789,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* Here we create an empty update on each plane.
* To fix this, DC should permit updating only stream properties.
*/
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];
@@ -8757,6 +8801,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state->stream,
&stream_update);
mutex_unlock(&dm->dc_lock);
+ kfree(dummy_updates);
}
/**
@@ -8835,8 +8880,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream)
- amdgpu_dm_commit_planes(state, dc_state, dev,
- dm, crtc, wait_for_vblank);
+ amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
}
/* Update audio instances for each connector. */
@@ -8871,10 +8915,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
- /* return the stolen vga memory back to VRAM */
- if (!adev->mman.keep_stolen_vga_memory)
- amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
+ /* Don't free the memory if we are hitting this as part of suspend.
+ * This way we don't free any memory during suspend; see
+ * amdgpu_bo_free_kernel(). The memory will be freed in the first
+ * non-suspend modeset or when the driver is torn down.
+ */
+ if (!adev->in_suspend) {
+ /* return the stolen vga memory back to VRAM */
+ if (!adev->mman.keep_stolen_vga_memory)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
+ }
/*
* Finally, drop a runtime PM reference for each newly disabled CRTC,
@@ -8884,9 +8935,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for (i = 0; i < crtc_disable_count; i++)
pm_runtime_put_autosuspend(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
-
- if (dc_state_temp)
- dc_release_state(dc_state_temp);
}
static int dm_force_atomic_commit(struct drm_connector *connector)
@@ -9018,8 +9066,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
&commit->flip_done, 10*HZ);
if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
- "timed out\n", crtc->base.id, crtc->name);
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ crtc->base.id, crtc->name);
drm_crtc_commit_put(commit);
}
@@ -9104,7 +9152,8 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
return false;
}
-static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
+static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
+{
u64 num, den, res;
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
@@ -9208,8 +9257,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
* TODO: Refactor this function to allow this check to work
* in all conditions.
*/
- if (amdgpu_freesync_vid_mode &&
- dm_new_crtc_state->stream &&
+ if (dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
goto skip_modeset;
@@ -9227,9 +9275,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
goto skip_modeset;
drm_dbg_state(state->dev,
- "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
- "planes_changed:%d, mode_changed:%d,active_changed:%d,"
- "connectors_changed:%d\n",
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
acrtc->crtc_id,
new_crtc_state->enable,
new_crtc_state->active,
@@ -9251,27 +9297,27 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
}
/* Now check if we should set freesync video mode */
- if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+ if (dm_new_crtc_state->stream &&
+ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
DRM_DEBUG_DRIVER(
- "Mode change not required for front porch change, "
- "setting mode_changed to %d",
+ "Mode change not required for front porch change, setting mode_changed to %d",
new_crtc_state->mode_changed);
set_freesync_fixed_config(dm_new_crtc_state);
goto skip_modeset;
- } else if (amdgpu_freesync_vid_mode && aconnector &&
+ } else if (aconnector &&
is_freesync_video_mode(&new_crtc_state->mode,
aconnector)) {
struct drm_display_mode *high_mode;
high_mode = get_highest_refresh_rate_mode(aconnector, false);
- if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
+ if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
set_freesync_fixed_config(dm_new_crtc_state);
- }
}
ret = dm_atomic_get_state(state, &dm_state);
@@ -9439,6 +9485,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
*/
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
struct amdgpu_framebuffer *old_afb, *new_afb;
+
if (other->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -9537,11 +9584,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
}
/* Core DRM takes care of checking FB modifiers, so we only need to
- * check tiling flags when the FB doesn't have a modifier. */
+ * check tiling flags when the FB doesn't have a modifier.
+ */
if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
if (adev->family < AMDGPU_FAMILY_AI) {
linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
- AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
+ AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
} else {
linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
@@ -9675,8 +9723,8 @@ static int dm_update_plane_state(struct dc *dc,
if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
return -EINVAL;
- else
- *is_top_most_overlay = false;
+
+ *is_top_most_overlay = false;
}
DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
@@ -9763,12 +9811,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
* cursor per pipe but it's going to inherit the scaling and
* positioning from the underlying pipe. Check the cursor plane's
- * blending properties match the underlying planes'. */
+ * blending properties match the underlying planes'.
+ */
new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
- if (!new_cursor_state || !new_cursor_state->fb) {
+ if (!new_cursor_state || !new_cursor_state->fb)
return 0;
- }
dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
@@ -9813,6 +9861,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
struct drm_connector_state *conn_state, *old_conn_state;
struct amdgpu_dm_connector *aconnector = NULL;
int i;
+
for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
if (!conn_state->crtc)
conn_state = old_conn_state;
@@ -10004,6 +10053,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) !=
+ get_mem_type(new_plane_state->fb))
+ lock_and_validation_needed = true;
+
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
new_plane_state,
@@ -10247,13 +10301,24 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
/* Store the overall update type for use later in atomic check. */
- for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct dm_crtc_state *dm_new_crtc_state =
to_dm_crtc_state(new_crtc_state);
+ /*
+ * Only allow async flips for fast updates that don't change
+ * the FB pitch, the DCC state, rotation, etc.
+ */
+ if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+ crtc->base.id, crtc->name);
+ ret = -EINVAL;
+ goto fail;
+ }
+
dm_new_crtc_state->update_type = lock_and_validation_needed ?
- UPDATE_TYPE_FULL :
- UPDATE_TYPE_FAST;
+ UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
}
/* Must be success */
@@ -10269,7 +10334,7 @@ fail:
else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
else
- DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
+ DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
trace_amdgpu_dm_atomic_check_finish(state, ret);
@@ -10323,7 +10388,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
input->cea_total_length = total_length;
memcpy(input->payload, data, length);
- res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
+ res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
if (!res) {
DRM_ERROR("EDID CEA parser failed\n");
return false;
@@ -10425,6 +10490,41 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
return ret;
}
+static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ u8 *edid_ext = NULL;
+ int i;
+ int j = 0;
+
+ if (edid == NULL || edid->extensions == 0)
+ return -ENODEV;
+
+ /* Find DisplayID extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (void *)(edid + (i + 1));
+ if (edid_ext[0] == DISPLAYID_EXT)
+ break;
+ }
+
+ while (j < EDID_LENGTH) {
+ struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
+ unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
+
+ if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID &&
+ amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) {
+ vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false;
+ vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3;
+ DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode);
+
+ return true;
+ }
+ j++;
+ }
+
+ return false;
+}
+
static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
@@ -10560,6 +10660,14 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
freesync_capable = true;
}
}
+ parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+
+ if (vsdb_info.replay_mode) {
+ amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode;
+ amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version;
+ amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
+ }
+
} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (i >= 0 && vsdb_info.freesync_supported) {
@@ -10773,3 +10881,13 @@ bool check_seamless_boot_capability(struct amdgpu_device *adev)
return false;
}
+
+bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
+}
+
+bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 2e2413fd73a4..a2d34be82613 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -51,6 +51,9 @@
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
+#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
+#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
+#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3 0x3
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -75,6 +78,12 @@ struct dmub_srv;
struct dc_plane_state;
struct dmub_notification;
+struct amd_vsdb_block {
+ unsigned char ieee_id[3];
+ unsigned char version;
+ unsigned char feature_caps;
+};
+
struct common_irq_params {
struct amdgpu_device *adev;
enum dc_irq_source irq_src;
@@ -195,6 +204,11 @@ struct hpd_rx_irq_offload_work_queue {
*/
bool is_handling_link_loss;
/**
+ * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
+ * ready event when we're already handling mst message ready event
+ */
+ bool is_handling_mst_msg_rdy_event;
+ /**
* @aconnector: The aconnector that this work queue is attached to
*/
struct amdgpu_dm_connector *aconnector;
@@ -604,6 +618,11 @@ struct amdgpu_hdmi_vsdb_info {
* @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
*/
unsigned int max_refresh_rate_hz;
+
+ /**
+ * @replay mode: Replay supported
+ */
+ bool replay_mode;
};
struct amdgpu_dm_connector {
@@ -638,6 +657,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ struct mutex handle_mst_msg_ready;
+
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@@ -661,10 +682,6 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
-#ifdef CONFIG_DEBUG_FS
- uint32_t debugfs_dpcd_address;
- uint32_t debugfs_dpcd_size;
-#endif
bool force_yuv420_output;
struct dsc_preferred_settings dsc_settings;
union dp_downstream_port_present mst_downstream_port_present;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 27711743c22c..52ecfa746b54 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -83,12 +83,15 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
-static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc)
+static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
{
struct drm_device *drm_dev = crtc->dev;
+ struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ bool was_activated;
spin_lock_irq(&drm_dev->event_lock);
+ was_activated = acrtc->dm_irq_params.window_param.activated;
acrtc->dm_irq_params.window_param.x_start = 0;
acrtc->dm_irq_params.window_param.y_start = 0;
acrtc->dm_irq_params.window_param.x_end = 0;
@@ -97,6 +100,14 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc)
acrtc->dm_irq_params.window_param.update_win = false;
acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
spin_unlock_irq(&drm_dev->event_lock);
+
+ /* Disable secure_display if it was enabled */
+ if (was_activated) {
+ /* stop ROI update on this crtc */
+ flush_work(&dm->secure_display_ctxs[crtc->index].notify_ta_work);
+ flush_work(&dm->secure_display_ctxs[crtc->index].forward_roi_work);
+ dc_stream_forward_crc_window(stream, NULL, true);
+ }
}
static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
@@ -112,9 +123,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
crtc = secure_display_ctx->crtc;
- if (!crtc) {
+ if (!crtc)
return;
- }
psp = &drm_to_adev(crtc->dev)->psp;
@@ -140,9 +150,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
if (!ret) {
- if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
+ if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
- }
}
mutex_unlock(&psp->securedisplay_context.mutex);
@@ -204,9 +213,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source)
{
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- int i;
-#endif
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
@@ -220,19 +226,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
/* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- /* Disable secure_display if it was enabled */
- if (!enable) {
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->dm.secure_display_ctxs[i].crtc == crtc) {
- /* stop ROI update on this crtc */
- flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
- flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
- dc_stream_forward_crc_window(stream_state, NULL, true);
- }
- }
- }
-#endif
if (!dc_stream_configure_crc(stream_state->ctx->dc,
stream_state, NULL, enable, enable)) {
ret = -EINVAL;
@@ -363,7 +356,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Reset secure_display when we change crc source from debugfs */
- amdgpu_dm_set_crc_window_default(crtc);
+ amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
#endif
if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 935adca6f048..748e80ef40d0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -100,7 +100,7 @@ struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts(
#else
#define amdgpu_dm_crc_window_is_activated(x)
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
-#define amdgpu_dm_crtc_secure_display_create_contexts()
+#define amdgpu_dm_crtc_secure_display_create_contexts(x)
#endif
#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index e3762e806617..30d4c6fd95f5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -146,7 +146,6 @@ static void vblank_control_worker(struct work_struct *work)
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
{
- enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
@@ -169,18 +168,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
if (rc)
return rc;
- if (amdgpu_in_reset(adev)) {
- irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
- /* During gpu-reset we disable and then enable vblank irq, so
- * don't use amdgpu_irq_get/put() to avoid refcount change.
- */
- if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
- rc = -EBUSY;
- } else {
- rc = (enable)
- ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
- : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
- }
+ rc = (enable)
+ ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
+ : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
if (rc)
return rc;
@@ -408,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ /*
+ * Only allow async flips for fast updates that don't change the FB
+ * pitch, the DCC state, rotation, etc.
+ */
+ if (crtc_state->async_flip &&
+ dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+ crtc->base.id, crtc->name);
+ return -EINVAL;
+ }
+
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 827fcb4fb3b3..7c21e21bcc51 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -336,6 +336,153 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
return size;
}
+static bool dp_mst_is_end_device(struct amdgpu_dm_connector *aconnector)
+{
+ bool is_end_device = false;
+ struct drm_dp_mst_topology_mgr *mgr = NULL;
+ struct drm_dp_mst_port *port = NULL;
+
+ if (aconnector->mst_root && aconnector->mst_root->mst_mgr.mst_state) {
+ mgr = &aconnector->mst_root->mst_mgr;
+ port = aconnector->mst_output_port;
+
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
+ port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV)
+ is_end_device = true;
+ drm_modeset_unlock(&mgr->base.lock);
+ }
+
+ return is_end_device;
+}
+
+/* Change MST link setting
+ *
+ * valid lane count value: 1, 2, 4
+ * valid link rate value:
+ * 06h = 1.62Gbps per lane
+ * 0Ah = 2.7Gbps per lane
+ * 0Ch = 3.24Gbps per lane
+ * 14h = 5.4Gbps per lane
+ * 1Eh = 8.1Gbps per lane
+ * 3E8h = 10.0Gbps per lane
+ * 546h = 13.5Gbps per lane
+ * 7D0h = 20.0Gbps per lane
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x/mst_link_settings
+ *
+ * for example, to force to 2 lane, 10.0GHz,
+ * echo 2 0x3e8 > /sys/kernel/debug/dri/0/DP-x/mst_link_settings
+ *
+ * Valid input will trigger hotplug event to get new link setting applied
+ * Invalid input will trigger training setting reset
+ *
+ * The usage can be referred to link_settings entry
+ *
+ */
+static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct dc_link *link = aconnector->dc_link;
+ struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+ struct dc *dc = (struct dc *)link->dc;
+ struct dc_link_settings prefer_link_settings;
+ char *wr_buf = NULL;
+ const uint32_t wr_buf_size = 40;
+ /* 0: lane_count; 1: link_rate */
+ int max_param_num = 2;
+ uint8_t param_nums = 0;
+ long param[2];
+ bool valid_input = true;
+
+ if (!dp_mst_is_end_device(aconnector))
+ return -EINVAL;
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+ if (!wr_buf)
+ return -ENOSPC;
+
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ if (param_nums <= 0) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return -EINVAL;
+ }
+
+ switch (param[0]) {
+ case LANE_COUNT_ONE:
+ case LANE_COUNT_TWO:
+ case LANE_COUNT_FOUR:
+ break;
+ default:
+ valid_input = false;
+ break;
+ }
+
+ switch (param[1]) {
+ case LINK_RATE_LOW:
+ case LINK_RATE_HIGH:
+ case LINK_RATE_RBR2:
+ case LINK_RATE_HIGH2:
+ case LINK_RATE_HIGH3:
+ case LINK_RATE_UHBR10:
+ case LINK_RATE_UHBR13_5:
+ case LINK_RATE_UHBR20:
+ break;
+ default:
+ valid_input = false;
+ break;
+ }
+
+ if (!valid_input) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
+ mutex_lock(&adev->dm.dc_lock);
+ dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false);
+ mutex_unlock(&adev->dm.dc_lock);
+ return -EINVAL;
+ }
+
+ /* save user force lane_count, link_rate to preferred settings
+ * spread spectrum will not be changed
+ */
+ prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+ prefer_link_settings.use_link_rate_set = false;
+ prefer_link_settings.lane_count = param[0];
+ prefer_link_settings.link_rate = param[1];
+
+ /* skip immediate retrain, and train to new link setting after hotplug event triggered */
+ mutex_lock(&adev->dm.dc_lock);
+ dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ mutex_lock(&aconnector->base.dev->mode_config.mutex);
+ aconnector->base.force = DRM_FORCE_OFF;
+ mutex_unlock(&aconnector->base.dev->mode_config.mutex);
+ drm_kms_helper_hotplug_event(aconnector->base.dev);
+
+ msleep(100);
+
+ mutex_lock(&aconnector->base.dev->mode_config.mutex);
+ aconnector->base.force = DRM_FORCE_UNSPECIFIED;
+ mutex_unlock(&aconnector->base.dev->mode_config.mutex);
+ drm_kms_helper_hotplug_event(aconnector->base.dev);
+
+ kfree(wr_buf);
+ return size;
+}
+
/* function: get current DP PHY settings: voltage swing, pre-emphasis,
* post-cursor2 (defined by VESA DP specification)
*
@@ -907,6 +1054,61 @@ unlock:
DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc);
/*
+ * Returns the current colorspace for the crtc.
+ * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_colorspace
+ */
+static int amdgpu_current_colorspace_show(struct seq_file *m, void *data)
+{
+ struct drm_crtc *crtc = m->private;
+ struct drm_device *dev = crtc->dev;
+ struct dm_crtc_state *dm_crtc_state = NULL;
+ int res = -ENODEV;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_lock(&crtc->mutex, NULL);
+ if (crtc->state == NULL)
+ goto unlock;
+
+ dm_crtc_state = to_dm_crtc_state(crtc->state);
+ if (dm_crtc_state->stream == NULL)
+ goto unlock;
+
+ switch (dm_crtc_state->stream->output_color_space) {
+ case COLOR_SPACE_SRGB:
+ seq_puts(m, "sRGB");
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ seq_puts(m, "BT601_YCC");
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ seq_puts(m, "BT709_YCC");
+ break;
+ case COLOR_SPACE_ADOBERGB:
+ seq_puts(m, "opRGB");
+ break;
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ seq_puts(m, "BT2020_RGB");
+ break;
+ case COLOR_SPACE_2020_YCBCR:
+ seq_puts(m, "BT2020_YCC");
+ break;
+ default:
+ goto unlock;
+ }
+ res = 0;
+
+unlock:
+ drm_modeset_unlock(&crtc->mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return res;
+}
+DEFINE_SHOW_ATTRIBUTE(amdgpu_current_colorspace);
+
+
+/*
* Example usage:
* Disable dsc passthrough, i.e.,: have dsc decoding at converver, not external RX
* echo 1 /sys/kernel/debug/dri/0/DP-1/dsc_disable_passthrough
@@ -1039,88 +1241,6 @@ static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *b
return write_size;
}
-static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- int r;
- struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
-
- if (size < sizeof(connector->debugfs_dpcd_address))
- return -EINVAL;
-
- r = copy_from_user(&connector->debugfs_dpcd_address,
- buf, sizeof(connector->debugfs_dpcd_address));
-
- return size - r;
-}
-
-static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- int r;
- struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
-
- if (size < sizeof(connector->debugfs_dpcd_size))
- return -EINVAL;
-
- r = copy_from_user(&connector->debugfs_dpcd_size,
- buf, sizeof(connector->debugfs_dpcd_size));
-
- if (connector->debugfs_dpcd_size > 256)
- connector->debugfs_dpcd_size = 0;
-
- return size - r;
-}
-
-static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- int r;
- char *data;
- struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
- struct dc_link *link = connector->dc_link;
- uint32_t write_size = connector->debugfs_dpcd_size;
-
- if (!write_size || size < write_size)
- return -EINVAL;
-
- data = kzalloc(write_size, GFP_KERNEL);
- if (!data)
- return 0;
-
- r = copy_from_user(data, buf, write_size);
-
- dm_helpers_dp_write_dpcd(link->ctx, link,
- connector->debugfs_dpcd_address, data, write_size - r);
- kfree(data);
- return write_size - r;
-}
-
-static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- int r;
- char *data;
- struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
- struct dc_link *link = connector->dc_link;
- uint32_t read_size = connector->debugfs_dpcd_size;
-
- if (!read_size || size < read_size)
- return 0;
-
- data = kzalloc(read_size, GFP_KERNEL);
- if (!data)
- return 0;
-
- dm_helpers_dp_read_dpcd(link->ctx, link,
- connector->debugfs_dpcd_address, data, read_size);
-
- r = copy_to_user(buf, data, read_size);
-
- kfree(data);
- return read_size - r;
-}
-
/* function: Read link's DSC & FEC capabilities
*
*
@@ -2682,25 +2802,6 @@ static const struct file_operations sdp_message_fops = {
.llseek = default_llseek
};
-static const struct file_operations dp_dpcd_address_debugfs_fops = {
- .owner = THIS_MODULE,
- .write = dp_dpcd_address_write,
- .llseek = default_llseek
-};
-
-static const struct file_operations dp_dpcd_size_debugfs_fops = {
- .owner = THIS_MODULE,
- .write = dp_dpcd_size_write,
- .llseek = default_llseek
-};
-
-static const struct file_operations dp_dpcd_data_debugfs_fops = {
- .owner = THIS_MODULE,
- .read = dp_dpcd_data_read,
- .write = dp_dpcd_data_write,
- .llseek = default_llseek
-};
-
static const struct file_operations dp_max_bpc_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_max_bpc_read,
@@ -2714,6 +2815,12 @@ static const struct file_operations dp_dsc_disable_passthrough_debugfs_fops = {
.llseek = default_llseek
};
+static const struct file_operations dp_mst_link_settings_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_mst_link_setting,
+ .llseek = default_llseek
+};
+
static const struct {
char *name;
const struct file_operations *fops;
@@ -2724,9 +2831,6 @@ static const struct {
{"test_pattern", &dp_phy_test_pattern_fops},
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
{"sdp_message", &sdp_message_fops},
- {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
- {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
- {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops},
{"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
{"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
{"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
@@ -2740,7 +2844,8 @@ static const struct {
{"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops},
{"is_mst_connector", &dp_is_mst_connector_fops},
{"mst_progress_status", &dp_mst_progress_status_fops},
- {"is_dpia_link", &is_dpia_link_fops}
+ {"is_dpia_link", &is_dpia_link_fops},
+ {"mst_link_settings", &dp_mst_link_settings_debugfs_fops}
};
static const struct {
@@ -2809,6 +2914,32 @@ static int psr_read_residency(void *data, u64 *val)
return 0;
}
+/* read allow_edp_hotplug_detection */
+static int allow_edp_hotplug_detection_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *aconnector = data;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ *val = adev->dm.dc->config.allow_edp_hotplug_detection;
+
+ return 0;
+}
+
+/* set allow_edp_hotplug_detection */
+static int allow_edp_hotplug_detection_set(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *aconnector = data;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ adev->dm.dc->config.allow_edp_hotplug_detection = (uint32_t) val;
+
+ return 0;
+}
+
/*
* Set dmcub trace event IRQ enable or disable.
* Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en
@@ -2847,6 +2978,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
"%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops,
+ allow_edp_hotplug_detection_get,
+ allow_edp_hotplug_detection_set, "%llu\n");
+
DEFINE_SHOW_ATTRIBUTE(current_backlight);
DEFINE_SHOW_ATTRIBUTE(target_backlight);
@@ -2887,7 +3022,7 @@ static int edp_ilr_show(struct seq_file *m, void *unused)
seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz);
}
} else {
- seq_printf(m, "ILR is not supported by this eDP panel.\n");
+ seq_puts(m, "ILR is not supported by this eDP panel.\n");
}
return 0;
@@ -3017,6 +3152,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
&target_backlight_fops);
debugfs_create_file("ilr_setting", 0644, dir, connector,
&edp_ilr_debugfs_fops);
+ debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector,
+ &allow_edp_hotplug_detection_fops);
}
for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) {
@@ -3025,9 +3162,6 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
connector_debugfs_entries[i].fops);
}
- connector->debugfs_dpcd_address = 0;
- connector->debugfs_dpcd_size = 0;
-
if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
debugfs_create_file(hdmi_debugfs_entries[i].name,
@@ -3246,6 +3380,8 @@ void crtc_debugfs_init(struct drm_crtc *crtc)
#endif
debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
crtc, &amdgpu_current_bpc_fops);
+ debugfs_create_file("amdgpu_current_colorspace", 0644, crtc->debugfs_entry,
+ crtc, &amdgpu_current_colorspace_fops);
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 5536d17306d0..20cfc5be21a4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -39,10 +39,10 @@
static bool
lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
{
-
struct dc_link *link = handle;
struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} };
- struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+ struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW,
+ link->dc->caps.i2c_speed_in_khz};
return dm_helpers_submit_i2c(link->ctx, link, &cmd);
}
@@ -52,8 +52,10 @@ lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint3
{
struct dc_link *link = handle;
- struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} };
- struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+ struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset},
+ {false, address, size, data} };
+ struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW,
+ link->dc->caps.i2c_speed_in_khz};
return dm_helpers_submit_i2c(link->ctx, link, &cmd);
}
@@ -76,7 +78,6 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
{
-
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.context.initialized) {
@@ -96,13 +97,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint
*srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
*srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
-
return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
}
-static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version)
+static int psp_set_srm(struct psp_context *psp,
+ u8 *srm, uint32_t srm_size, uint32_t *srm_version)
{
-
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.context.initialized) {
@@ -119,7 +119,8 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size,
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+ hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
return -EINVAL;
@@ -150,7 +151,6 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
static void link_lock(struct hdcp_workqueue *work, bool lock)
{
-
int i = 0;
for (i = 0; i < work->max_link; i++) {
@@ -160,66 +160,60 @@ static void link_lock(struct hdcp_workqueue *work, bool lock)
mutex_unlock(&work[i].mutex);
}
}
+
void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
unsigned int link_index,
struct amdgpu_dm_connector *aconnector,
- uint8_t content_type,
+ u8 content_type,
bool enable_encryption)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
- struct mod_hdcp_display *display = &hdcp_work[link_index].display;
- struct mod_hdcp_link *link = &hdcp_work[link_index].link;
- struct mod_hdcp_display_query query;
+ struct mod_hdcp_link_adjustment link_adjust;
+ struct mod_hdcp_display_adjustment display_adjust;
unsigned int conn_index = aconnector->base.index;
mutex_lock(&hdcp_w->mutex);
hdcp_w->aconnector[conn_index] = aconnector;
- query.display = NULL;
- mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query);
-
- if (query.display != NULL) {
- memcpy(display, query.display, sizeof(struct mod_hdcp_display));
- mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
-
- hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
-
- if (enable_encryption) {
- /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
- * (s3 resume case)
- */
- if (hdcp_work->srm_size > 0)
- psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
- &hdcp_work->srm_version);
-
- display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
- if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
- hdcp_w->link.adjust.hdcp1.disable = 0;
- hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
- } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) {
- hdcp_w->link.adjust.hdcp1.disable = 1;
- hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
- }
+ memset(&link_adjust, 0, sizeof(link_adjust));
+ memset(&display_adjust, 0, sizeof(display_adjust));
- schedule_delayed_work(&hdcp_w->property_validate_dwork,
- msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
- } else {
- display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
- hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
- cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ if (enable_encryption) {
+ /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
+ * (s3 resume case)
+ */
+ if (hdcp_work->srm_size > 0)
+ psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm,
+ hdcp_work->srm_size,
+ &hdcp_work->srm_version);
+
+ display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
+
+ link_adjust.auth_delay = 2;
+
+ if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
+ link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
+ } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) {
+ link_adjust.hdcp1.disable = 1;
+ link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
}
- display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ schedule_delayed_work(&hdcp_w->property_validate_dwork,
+ msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+ } else {
+ display_adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
+ hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
}
- mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
+ mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
process_output(hdcp_w);
mutex_unlock(&hdcp_w->mutex);
}
static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
- unsigned int link_index,
+ unsigned int link_index,
struct amdgpu_dm_connector *aconnector)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -238,7 +232,8 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n",
- aconnector->base.index, conn_state->hdcp_content_type, aconnector->base.dpms);
+ aconnector->base.index, conn_state->hdcp_content_type,
+ aconnector->base.dpms);
}
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
@@ -246,6 +241,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
process_output(hdcp_w);
mutex_unlock(&hdcp_w->mutex);
}
+
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -274,15 +270,12 @@ void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index
schedule_work(&hdcp_w->cpirq_work);
}
-
-
-
static void event_callback(struct work_struct *work)
{
struct hdcp_workqueue *hdcp_work;
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
- callback_dwork);
+ callback_dwork);
mutex_lock(&hdcp_work->mutex);
@@ -294,13 +287,12 @@ static void event_callback(struct work_struct *work)
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
-
-
}
static void event_property_update(struct work_struct *work)
{
- struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
+ struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue,
+ property_update_work);
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_device *dev;
long ret;
@@ -334,11 +326,10 @@ static void event_property_update(struct work_struct *work)
mutex_lock(&hdcp_work->mutex);
if (conn_state->commit) {
- ret = wait_for_completion_interruptible_timeout(
- &conn_state->commit->hw_done, 10 * HZ);
+ ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
+ 10 * HZ);
if (ret == 0) {
- DRM_ERROR(
- "HDCP state unknown! Setting it to DESIRED");
+ DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n");
hdcp_work->encryption_status[conn_index] =
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
}
@@ -349,24 +340,20 @@ static void event_property_update(struct work_struct *work)
DRM_MODE_HDCP_CONTENT_TYPE0 &&
hdcp_work->encryption_status[conn_index] <=
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) {
-
DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n");
- drm_hdcp_update_content_protection(
- connector,
- DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ drm_hdcp_update_content_protection(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED);
} else if (conn_state->hdcp_content_type ==
DRM_MODE_HDCP_CONTENT_TYPE1 &&
hdcp_work->encryption_status[conn_index] ==
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) {
- drm_hdcp_update_content_protection(
- connector,
- DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ drm_hdcp_update_content_protection(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED);
}
} else {
DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n");
- drm_hdcp_update_content_protection(
- connector, DRM_MODE_CONTENT_PROTECTION_DESIRED);
-
+ drm_hdcp_update_content_protection(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED);
}
mutex_unlock(&hdcp_work->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -402,7 +389,7 @@ static void event_property_validate(struct work_struct *work)
&query);
DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n",
- aconnector->base.index,
+ aconnector->base.index,
aconnector->base.state->content_protection,
query.encryption_status,
hdcp_work->encryption_status[conn_index]);
@@ -410,7 +397,8 @@ static void event_property_validate(struct work_struct *work)
if (query.encryption_status !=
hdcp_work->encryption_status[conn_index]) {
DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n",
- hdcp_work->encryption_status[conn_index], query.encryption_status);
+ hdcp_work->encryption_status[conn_index],
+ query.encryption_status);
hdcp_work->encryption_status[conn_index] =
query.encryption_status;
@@ -429,7 +417,7 @@ static void event_watchdog_timer(struct work_struct *work)
struct hdcp_workqueue *hdcp_work;
hdcp_work = container_of(to_delayed_work(work),
- struct hdcp_workqueue,
+ struct hdcp_workqueue,
watchdog_timer_dwork);
mutex_lock(&hdcp_work->mutex);
@@ -443,7 +431,6 @@ static void event_watchdog_timer(struct work_struct *work)
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
-
}
static void event_cpirq(struct work_struct *work)
@@ -459,10 +446,8 @@ static void event_cpirq(struct work_struct *work)
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
-
}
-
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
{
int i = 0;
@@ -478,10 +463,8 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
kfree(hdcp_work);
}
-
static bool enable_assr(void *handle, struct dc_link *link)
{
-
struct hdcp_workqueue *hdcp_work = handle;
struct mod_hdcp hdcp = hdcp_work->hdcp;
struct psp_context *psp = hdcp.config.psp.handle;
@@ -499,7 +482,8 @@ static bool enable_assr(void *handle, struct dc_link *link)
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
- dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst;
+ dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index =
+ link->link_enc_hw_inst;
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
@@ -521,7 +505,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
int link_index = aconnector->dc_link->link_index;
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
- struct drm_connector_state *conn_state;
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
struct dc_sink *sink = NULL;
bool link_is_hdcp14 = false;
@@ -541,7 +525,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
else if (aconnector->dc_em_sink)
sink = aconnector->dc_em_sink;
- if (sink != NULL)
+ if (sink)
link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal);
display->controller = CONTROLLER_ID_D0 + config->otg_inst;
@@ -564,19 +548,27 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
link->adjust.auth_delay = 2;
link->adjust.hdcp1.disable = 0;
- conn_state = aconnector->base.state;
+ hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
- (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1,
- (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1);
+ (!!aconnector->base.state) ?
+ aconnector->base.state->content_protection : -1,
+ (!!aconnector->base.state) ?
+ aconnector->base.state->hdcp_content_type : -1);
- if (conn_state)
- hdcp_update_display(hdcp_work, link_index, aconnector,
- conn_state->hdcp_content_type, false);
-}
+ mutex_lock(&hdcp_w->mutex);
+
+ mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
+
+ process_output(hdcp_w);
+ mutex_unlock(&hdcp_w->mutex);
+}
-/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
+/**
+ * DOC: Add sysfs interface for set/get srm
+ *
+ * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
* will automatically call once or twice depending on the size
*
* call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
@@ -587,23 +579,23 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
* sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
* the last call we will send the full SRM. PSP will fail on every call before the last.
*
- * This means we don't know if the SRM is good until the last call. And because of this limitation we
- * cannot throw errors early as it will stop the kernel from writing to sysfs
+ * This means we don't know if the SRM is good until the last call. And because of this
+ * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs
*
* Example 1:
- * Good SRM size = 5096
- * first call to write 4096 -> PSP fails
- * Second call to write 1000 -> PSP Pass -> SRM is set
+ * Good SRM size = 5096
+ * first call to write 4096 -> PSP fails
+ * Second call to write 1000 -> PSP Pass -> SRM is set
*
* Example 2:
- * Bad SRM size = 4096
- * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
- * is the last call)
+ * Bad SRM size = 4096
+ * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
+ * is the last call)
*
* Solution?:
- * 1: Parse the SRM? -> It is signed so we don't know the EOF
- * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
- * below
+ * 1: Parse the SRM? -> It is signed so we don't know the EOF
+ * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
+ * below
*
* Easy Solution:
* Always call get after Set to verify if set was successful.
@@ -612,20 +604,21 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
* +----------------------+
* PSP will only update its srm if its older than the one we are trying to load.
* Always do set first than get.
- * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
- * version and save it
+ * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
+ * version and save it
*
- * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
- * same(newer) version back and save it
+ * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
+ * same(newer) version back and save it
*
- * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
- * incorrect/corrupted and we should correct our SRM by getting it from PSP
+ * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
+ * incorrect/corrupted and we should correct our SRM by getting it from PSP
*/
-static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
- uint32_t srm_version = 0;
+ u32 srm_version = 0;
work = container_of(bin_attr, struct hdcp_workqueue, attr);
link_lock(work, true);
@@ -639,19 +632,19 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bi
work->srm_version = srm_version;
}
-
link_lock(work, false);
return count;
}
-static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+static ssize_t srm_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
- uint8_t *srm = NULL;
- uint32_t srm_version;
- uint32_t srm_size;
+ u8 *srm = NULL;
+ u32 srm_version;
+ u32 srm_size;
size_t ret = count;
work = container_of(bin_attr, struct hdcp_workqueue, attr);
@@ -684,12 +677,12 @@ ret:
/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
*
* For example,
- * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
- * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
- * across boot/reboots/suspend/resume/shutdown
+ * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
+ * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
+ * across boot/reboots/suspend/resume/shutdown
*
- * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need
- * to make the SRM persistent.
+ * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP
+ * we need to make the SRM persistent.
*
* -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
* -The kernel cannot write to the file systems.
@@ -699,8 +692,8 @@ ret:
*
* Usermode can read/write to/from PSP using the sysfs interface
* For example:
- * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
- * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
+ * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
+ * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
*/
static const struct bin_attribute data_attr = {
.attr = {.name = "hdcp_srm", .mode = 0664},
@@ -709,10 +702,9 @@ static const struct bin_attribute data_attr = {
.read = srm_data_read,
};
-
-struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc)
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
+ struct cp_psp *cp_psp, struct dc *dc)
{
-
int max_caps = dc->caps.max_links;
struct hdcp_workqueue *hdcp_work;
int i = 0;
@@ -721,14 +713,16 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
if (ZERO_OR_NULL_PTR(hdcp_work))
return NULL;
- hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
+ hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
+ sizeof(*hdcp_work->srm), GFP_KERNEL);
- if (hdcp_work->srm == NULL)
+ if (!hdcp_work->srm)
goto fail_alloc_context;
- hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
+ hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
+ sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
- if (hdcp_work->srm_temp == NULL)
+ if (!hdcp_work->srm_temp)
goto fail_alloc_context;
hdcp_work->max_link = max_caps;
@@ -781,10 +775,5 @@ fail_alloc_context:
kfree(hdcp_work);
return NULL;
-
-
-
}
-
-
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index c6ce2b7123b7..4b230933b28e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -44,18 +44,39 @@
#include "dm_helpers.h"
#include "ddc_service_types.h"
-/* MST Dock */
-static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA";
+static u32 edid_extract_panel_id(struct edid *edid)
+{
+ return (u32)edid->mfg_id[0] << 24 |
+ (u32)edid->mfg_id[1] << 16 |
+ (u32)EDID_PRODUCT_ID(edid);
+}
-/* dm_helpers_parse_edid_caps
- *
- * Parse edid caps
+static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
+{
+ uint32_t panel_id = edid_extract_panel_id(edid);
+
+ switch (panel_id) {
+ /* Workaround for some monitors which does not work well with FAMS */
+ case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
+ case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
+ case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
+ DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
+ edid_caps->panel_patch.disable_fams = true;
+ break;
+ default:
+ return;
+ }
+}
+
+/**
+ * dm_helpers_parse_edid_caps() - Parse edid caps
*
+ * @link: current detected link
* @edid: [in] pointer to edid
- * edid_caps: [in] pointer to edid caps
- * @return
- * void
- * */
+ * @edid_caps: [in] pointer to edid caps
+ *
+ * Return: void
+ */
enum dc_edid_status dm_helpers_parse_edid_caps(
struct dc_link *link,
const struct dc_edid *edid,
@@ -96,7 +117,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
if (sad_count <= 0)
return result;
- edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
+ edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT);
for (i = 0; i < edid_caps->audio_mode_count; ++i) {
struct cea_sad *sad = &sads[i];
@@ -118,6 +139,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
else
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
+ apply_edid_quirks(edid_buf, edid_caps);
+
kfree(sads);
kfree(sadb);
@@ -232,7 +255,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
/* Accessing the connector state is required for vcpi_slots allocation
* and directly relies on behaviour in commit check
* that blocks before commit guaranteeing that the state
- * is not gonna be swapped while still in use in commit tail */
+ * is not gonna be swapped while still in use in commit tail
+ */
if (!aconnector || !aconnector->mst_root)
return false;
@@ -259,7 +283,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
- * sequence. copy DRM MST allocation to dc */
+ * sequence. copy DRM MST allocation to dc
+ */
fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
return true;
@@ -403,7 +428,7 @@ void dm_dtn_log_append_v(struct dc_context *ctx,
total = log_ctx->pos + n + 1;
if (total > log_ctx->size) {
- char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
+ char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL);
if (buf) {
memcpy(buf, log_ctx->buf, log_ctx->pos);
@@ -610,7 +635,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
if (ret < 0) {
- DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret);
+ DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret);
return false;
}
@@ -632,7 +657,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length);
}
- DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success);
+ DC_LOG_DC("%s: success = %d\n", __func__, success);
return success;
}
@@ -641,7 +666,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
{
unsigned char data[16] = {0};
- DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n");
+ DC_LOG_DC("Start %s\n", __func__);
// Step 2
data[0] = 'P';
@@ -699,9 +724,12 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL))
return;
- DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n");
+ DC_LOG_DC("Done %s\n", __func__);
}
+/* MST Dock */
+static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA";
+
static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
struct drm_dp_aux *aux,
const struct dc_stream_state *stream,
@@ -885,10 +913,34 @@ enum dc_edid_status dm_helpers_read_local_edid(
DRM_ERROR("EDID err: %d, on connector: %s",
edid_status,
aconnector->base.name);
+ if (link->aux_mode) {
+ union test_request test_request = {0};
+ union test_response test_response = {0};
- /* DP Compliance Test 4.2.2.3 */
- if (link->aux_mode)
- drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
+ dm_helpers_dp_read_dpcd(ctx,
+ link,
+ DP_TEST_REQUEST,
+ &test_request.raw,
+ sizeof(union test_request));
+
+ if (!test_request.bits.EDID_READ)
+ return edid_status;
+
+ test_response.bits.EDID_CHECKSUM_WRITE = 1;
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_EDID_CHECKSUM,
+ &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
+ 1);
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+
+ }
return edid_status;
}
@@ -945,9 +997,8 @@ void dm_helpers_override_panel_settings(
struct dc_panel_config *panel_config)
{
// Feature DSC
- if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
panel_config->dsc.disable_dsc_edp = true;
- }
}
void *dm_helpers_allocate_gpu_mem(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 19f543ba7205..51467f132c26 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -120,7 +120,8 @@ static void dm_irq_work_func(struct work_struct *work)
/* Call a DAL subcomponent which registered for interrupt notification
* at INTERRUPT_LOW_IRQ_CONTEXT.
- * (The most common use is HPD interrupt) */
+ * (The most common use is HPD interrupt)
+ */
}
/*
@@ -172,7 +173,8 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
if (handler_removed == false) {
/* Not necessarily an error - caller may not
- * know the context. */
+ * know the context.
+ */
return NULL;
}
@@ -261,7 +263,7 @@ validate_irq_registration_params(struct dc_interrupt_params *int_params,
static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
irq_handler_idx handler_idx)
{
- if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
+ if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) {
DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
return false;
}
@@ -343,7 +345,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
/* This pointer will be stored by code which requested interrupt
* registration.
* The same pointer will be needed in order to unregister the
- * interrupt. */
+ * interrupt.
+ */
DRM_DEBUG_KMS(
"DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
@@ -390,7 +393,8 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
if (handler_list == NULL) {
/* If we got here, it means we searched all irq contexts
- * for this irq source, but the handler was not found. */
+ * for this irq source, but the handler was not found.
+ */
DRM_ERROR(
"DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
ih, irq_source);
@@ -450,7 +454,8 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
/* The handler was removed from the table,
* it means it is safe to flush all the 'work'
- * (because no code can schedule a new one). */
+ * (because no code can schedule a new one).
+ */
lh = &adev->dm.irq_handler_list_low_tab[src];
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
@@ -494,7 +499,7 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
if (!list_empty(hnd_list_l)) {
- list_for_each_safe (entry, tmp, hnd_list_l) {
+ list_for_each_safe(entry, tmp, hnd_list_l) {
handler = list_entry(
entry,
struct amdgpu_dm_irq_handler_data,
@@ -571,7 +576,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
if (list_empty(handler_list))
return;
- list_for_each_entry (handler_data, handler_list, list) {
+ list_for_each_entry(handler_data, handler_list, list) {
if (queue_work(system_highpri_wq, &handler_data->work)) {
work_queued = true;
break;
@@ -627,7 +632,8 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
&adev->dm.irq_handler_list_high_tab[irq_source],
list) {
/* Call a subcomponent which registered for immediate
- * interrupt notification */
+ * interrupt notification
+ */
handler_data->handler(handler_data->handler_arg);
}
@@ -664,7 +670,7 @@ static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
return 0;
}
-static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
+static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type)
{
switch (type) {
case AMDGPU_HPD_1:
@@ -686,7 +692,7 @@ static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
@@ -698,7 +704,7 @@ static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
static inline int dm_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned crtc_id,
+ unsigned int crtc_id,
enum amdgpu_interrupt_state state,
const enum irq_type dal_irq_type,
const char *func)
@@ -729,7 +735,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned crtc_id,
+ unsigned int crtc_id,
enum amdgpu_interrupt_state state)
{
return dm_irq_state(
@@ -743,7 +749,7 @@ static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned crtc_id,
+ unsigned int crtc_id,
enum amdgpu_interrupt_state state)
{
return dm_irq_state(
@@ -893,13 +899,13 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd,
true);
}
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd_rx,
true);
@@ -928,13 +934,13 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
to_amdgpu_dm_connector(connector);
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd,
false);
}
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd_rx,
false);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 810ab682f424..57230661132b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -45,8 +45,7 @@
#endif
#include "dc/dcn20/dcn20_resource.h"
-bool is_timing_changed(struct dc_stream_state *cur_stream,
- struct dc_stream_state *new_stream);
+
#define PEAK_FACTOR_X1000 1006
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
@@ -297,6 +296,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (!aconnector->edid) {
struct edid *edid;
+
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port);
if (!edid) {
@@ -620,8 +620,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
return connector;
}
+void dm_handle_mst_sideband_msg_ready_event(
+ struct drm_dp_mst_topology_mgr *mgr,
+ enum mst_msg_ready_type msg_rdy_type)
+{
+ uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ uint8_t dret;
+ bool new_irq_handled = false;
+ int dpcd_addr;
+ uint8_t dpcd_bytes_to_read;
+ const uint8_t max_process_count = 30;
+ uint8_t process_count = 0;
+ u8 retry;
+ struct amdgpu_dm_connector *aconnector =
+ container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+
+
+ const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+ if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+ dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+ /* DPCD 0x200 - 0x201 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT;
+ } else {
+ dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+ /* DPCD 0x2002 - 0x2005 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT_ESI;
+ }
+
+ mutex_lock(&aconnector->handle_mst_msg_ready);
+
+ while (process_count < max_process_count) {
+ u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
+
+ process_count++;
+
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ if (dret != dpcd_bytes_to_read) {
+ DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
+ break;
+ }
+
+ DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+
+ switch (msg_rdy_type) {
+ case DOWN_REP_MSG_RDY_EVENT:
+ /* Only handle DOWN_REP_MSG_RDY case*/
+ esi[1] &= DP_DOWN_REP_MSG_RDY;
+ break;
+ case UP_REQ_MSG_RDY_EVENT:
+ /* Only handle UP_REQ_MSG_RDY case*/
+ esi[1] &= DP_UP_REQ_MSG_RDY;
+ break;
+ default:
+ /* Handle both cases*/
+ esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
+ break;
+ }
+
+ if (!esi[1])
+ break;
+
+ /* handle MST irq */
+ if (aconnector->mst_mgr.mst_state)
+ drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
+ esi,
+ ack,
+ &new_irq_handled);
+
+ if (new_irq_handled) {
+ /* ACK at DPCD to notify down stream */
+ for (retry = 0; retry < 3; retry++) {
+ ssize_t wret;
+
+ wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
+ dpcd_addr + 1,
+ ack[1]);
+ if (wret == 1)
+ break;
+ }
+
+ if (retry == 3) {
+ DRM_ERROR("Failed to ack MST event.\n");
+ break;
+ }
+
+ drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
+
+ new_irq_handled = false;
+ } else {
+ break;
+ }
+ }
+
+ mutex_unlock(&aconnector->handle_mst_msg_ready);
+
+ if (process_count == max_process_count)
+ DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+}
+
+static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
+{
+ dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
+}
+
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
+ .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
@@ -718,6 +828,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
&dsc_options,
0,
params[i].timing,
+ dc_link_get_highest_encoding_format(params[i].aconnector->dc_link),
&params[i].timing->dsc_cfg)) {
params[i].timing->flags.DSC = 1;
@@ -768,7 +879,9 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
&dsc_options,
- (int) kbps, param.timing, &dsc_config);
+ (int) kbps, param.timing,
+ dc_link_get_highest_encoding_format(param.aconnector->dc_link),
+ &dsc_config);
return dsc_config.bits_per_pixel;
}
@@ -1006,8 +1119,11 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
dsc_policy.min_target_bpp * 16,
dsc_policy.max_target_bpp * 16,
&stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, &params[count].bw_range))
- params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ &stream->timing,
+ dc_link_get_highest_encoding_format(dc_link),
+ &params[count].bw_range))
+ params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(dc_link));
count++;
}
@@ -1211,7 +1327,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
- if (!res_pool->funcs->remove_stream_from_ctx ||
+ if (res_pool->funcs->remove_stream_from_ctx &&
res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return -EINVAL;
@@ -1422,7 +1538,7 @@ int pre_validate_dsc(struct drm_atomic_state *state,
struct dc_stream_state *stream = dm_state->context->streams[i];
if (local_dc_state->streams[i] &&
- is_timing_changed(stream, local_dc_state->streams[i])) {
+ dc_is_timing_changed(stream, local_dc_state->streams[i])) {
DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i);
} else {
int ind = find_crtc_index_in_state_by_stream(state, stream);
@@ -1467,7 +1583,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
dsc_policy.min_target_bpp * 16,
dsc_policy.max_target_bpp * 16,
&stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, bw_range);
+ &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 1e4ede1e57ab..37c820ab0fdb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -49,6 +49,13 @@
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
+enum mst_msg_ready_type {
+ NONE_MSG_RDY_EVENT = 0,
+ DOWN_REP_MSG_RDY_EVENT = 1,
+ UP_REQ_MSG_RDY_EVENT = 2,
+ DOWN_OR_UP_MSG_RDY_EVENT = 3
+};
+
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
@@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
+void dm_handle_mst_sideband_msg_ready_event(
+ struct drm_dp_mst_topology_mgr *mgr,
+ enum mst_msg_ready_type msg_rdy_type);
+
struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 322668973747..8eeca160d434 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -113,6 +113,11 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_ABGR16161616,
+ DRM_FORMAT_ARGB16161616F,
};
uint32_t format = plane_state->fb->format->format;
unsigned int i;
@@ -164,7 +169,7 @@ static bool modifier_has_dcc(uint64_t modifier)
return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
}
-static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
+static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return 0;
@@ -581,7 +586,7 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev,
int pkrs = 0;
u32 gb_addr_config;
u8 i = 0;
- unsigned swizzle_r_x;
+ unsigned int swizzle_r_x;
uint64_t modifier_r_x;
uint64_t modifier_dcc_best;
uint64_t modifier_dcc_4k;
@@ -698,8 +703,8 @@ static int get_plane_formats(const struct drm_plane *plane,
* caps list.
*/
- switch (plane->type) {
- case DRM_PLANE_TYPE_PRIMARY:
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY ||
+ (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) {
for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
if (num_formats >= max_formats)
break;
@@ -717,25 +722,29 @@ static int get_plane_formats(const struct drm_plane *plane,
formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
}
- break;
+ } else {
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_OVERLAY:
+ for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
- case DRM_PLANE_TYPE_OVERLAY:
- for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
- if (num_formats >= max_formats)
- break;
+ formats[num_formats++] = overlay_formats[i];
+ }
+ break;
- formats[num_formats++] = overlay_formats[i];
- }
- break;
+ case DRM_PLANE_TYPE_CURSOR:
+ for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
- case DRM_PLANE_TYPE_CURSOR:
- for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
- if (num_formats >= max_formats)
- break;
+ formats[num_formats++] = cursor_formats[i];
+ }
+ break;
- formats[num_formats++] = cursor_formats[i];
+ default:
+ break;
}
- break;
}
return num_formats;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 75284e2cec74..848c5b4bb301 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -334,7 +334,8 @@ bool dm_pp_get_clock_levels_by_type(
if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
/* This clock is higher the validation clock.
* Than means the previous one is the highest
- * non-boosted one. */
+ * non-boosted one.
+ */
DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
dc_clks->num_levels, i);
dc_clks->num_levels = i > 0 ? i : 1;
@@ -406,10 +407,10 @@ bool dm_pp_notify_wm_clock_changes(
* TODO: expand this to other ASICs
*/
if ((adev->asic_type >= CHIP_POLARIS10) &&
- (adev->asic_type <= CHIP_VEGAM) &&
- !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
- (void *)wm_with_clock_ranges))
- return true;
+ (adev->asic_type <= CHIP_VEGAM) &&
+ !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
+ (void *)wm_with_clock_ranges))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index d647f68fd563..08ce3bb8f640 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -24,6 +24,7 @@
*/
#include "amdgpu_dm_psr.h"
+#include "dc_dmub_srv.h"
#include "dc.h"
#include "dm_helpers.h"
#include "amdgpu_dm.h"
@@ -50,7 +51,7 @@ static bool link_supports_psrsu(struct dc_link *link)
!link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
return false;
- return true;
+ return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
}
/*
@@ -165,6 +166,7 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*/
if (vsync_rate_hz != 0) {
unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
+
num_frames_static = (30000 / frame_time_microsec) + 1;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
new file mode 100644
index 000000000000..32d3086c4cb7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "amdgpu_dm_replay.h"
+#include "dc.h"
+#include "dm_helpers.h"
+#include "amdgpu_dm.h"
+#include "modules/power/power_helpers.h"
+#include "dmub/inc/dmub_cmd.h"
+#include "dc/inc/link.h"
+
+/*
+ * link_supports_replay() - check if the link supports replay
+ * @link: link
+ * @aconnector: aconnector
+ *
+ */
+static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
+{
+ struct dm_connector_state *state = to_dm_connector_state(aconnector->base.state);
+ struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
+ struct adaptive_sync_caps *as_caps = &link->dpcd_caps.adaptive_sync_caps;
+
+ if (!state->freesync_capable)
+ return false;
+
+ if (!aconnector->vsdb_info.replay_mode)
+ return false;
+
+ // Check the eDP version
+ if (dpcd_caps->edp_rev < EDP_REVISION_13)
+ return false;
+
+ if (!dpcd_caps->alpm_caps.bits.AUX_WAKE_ALPM_CAP)
+ return false;
+
+ // Check adaptive sync support cap
+ if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT)
+ return false;
+
+ return true;
+}
+
+/*
+ * amdgpu_dm_setup_replay() - setup replay configuration
+ * @link: link
+ * @aconnector: aconnector
+ *
+ */
+bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
+{
+ struct replay_config pr_config;
+ union replay_debug_flags *debug_flags = NULL;
+
+ // For eDP, if Replay is supported, return true to skip checks
+ if (link->replay_settings.config.replay_supported)
+ return true;
+
+ if (!dc_is_embedded_signal(link->connector_signal))
+ return false;
+
+ if (link->panel_config.psr.disallow_replay)
+ return false;
+
+ if (!link_supports_replay(link, aconnector))
+ return false;
+
+ // Mark Replay is supported in link and update related attributes
+ pr_config.replay_supported = true;
+ pr_config.replay_power_opt_supported = 0;
+ pr_config.replay_enable_option |= pr_enable_option_static_screen;
+ pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq ? true : false;
+
+ if (!pr_config.replay_timing_sync_supported)
+ pr_config.replay_enable_option &= ~pr_enable_option_general_ui;
+
+ debug_flags = (union replay_debug_flags *)&pr_config.debug_flags;
+ debug_flags->u32All = 0;
+ debug_flags->bitfields.visual_confirm =
+ link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY ? true : false;
+
+ link->replay_settings.replay_feature_enabled = true;
+
+ init_replay_config(link, &pr_config);
+
+ return true;
+}
+
+
+/*
+ * amdgpu_dm_replay_enable() - enable replay f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
+{
+ uint64_t state;
+ unsigned int retry_count;
+ bool replay_active = true;
+ const unsigned int max_retry = 1000;
+ bool force_static = true;
+ struct dc_link *link = NULL;
+
+
+ if (stream == NULL)
+ return false;
+
+ link = stream->link;
+
+ if (link == NULL)
+ return false;
+
+ link->dc->link_srv->edp_setup_replay(link, stream);
+
+ link->dc->link_srv->edp_set_replay_allow_active(link, NULL, false, false, NULL);
+
+ link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, false, true, NULL);
+
+ if (wait == true) {
+
+ for (retry_count = 0; retry_count <= max_retry; retry_count++) {
+ dc_link_get_replay_state(link, &state);
+ if (replay_active) {
+ if (state != REPLAY_STATE_0 &&
+ (!force_static || state == REPLAY_STATE_3))
+ break;
+ } else {
+ if (state == REPLAY_STATE_0)
+ break;
+ }
+ udelay(500);
+ }
+
+ /* assert if max retry hit */
+ if (retry_count >= max_retry)
+ ASSERT(0);
+ } else {
+ /* To-do: Add trace log */
+ }
+
+ return true;
+}
+
+/*
+ * amdgpu_dm_replay_disable() - disable replay f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_replay_disable(struct dc_stream_state *stream)
+{
+
+ if (stream->link) {
+ DRM_DEBUG_DRIVER("Disabling replay...\n");
+ stream->link->dc->link_srv->edp_set_replay_allow_active(stream->link, NULL, false, false, NULL);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
new file mode 100644
index 000000000000..01cba3cd6246
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef AMDGPU_DM_AMDGPU_DM_REPLAY_H_
+#define AMDGPU_DM_AMDGPU_DM_REPLAY_H_
+
+#include "amdgpu.h"
+
+enum replay_enable_option {
+ pr_enable_option_static_screen = 0x1,
+ pr_enable_option_mpo_video = 0x2,
+ pr_enable_option_full_screen_video = 0x4,
+ pr_enable_option_general_ui = 0x8,
+ pr_enable_option_static_screen_coasting = 0x10000,
+ pr_enable_option_mpo_video_coasting = 0x20000,
+ pr_enable_option_full_screen_video_coasting = 0x40000,
+};
+
+
+bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable);
+bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
+bool amdgpu_dm_replay_disable(struct dc_stream_state *stream);
+
+#endif /* AMDGPU_DM_AMDGPU_DM_REPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
index c42aa947c969..172aa10a8800 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
@@ -33,6 +33,8 @@
#include <asm/cputable.h>
#elif defined(CONFIG_ARM64)
#include <asm/neon.h>
+#elif defined(CONFIG_LOONGARCH)
+#include <asm/fpu.h>
#endif
/**
@@ -88,7 +90,7 @@ void dc_fpu_begin(const char *function_name, const int line)
*pcpu += 1;
if (*pcpu == 1) {
-#if defined(CONFIG_X86)
+#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
migrate_disable();
kernel_fpu_begin();
#elif defined(CONFIG_PPC64)
@@ -128,7 +130,7 @@ void dc_fpu_end(const char *function_name, const int line)
pcpu = get_cpu_ptr(&fpu_recursion_depth);
*pcpu -= 1;
if (*pcpu <= 0) {
-#if defined(CONFIG_X86)
+#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
kernel_fpu_end();
migrate_enable();
#elif defined(CONFIG_PPC64)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 352e9afb85c6..e295a839ab47 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -24,7 +24,7 @@
*/
#include "dm_services.h"
-#include "conversion.h"
+#include "basics/conversion.h"
#define DIVIDER 10000
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index 84aeccf36b4b..6d2924114a3e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -50,12 +50,11 @@ bool dal_vector_construct(
return true;
}
-static bool dal_vector_presized_costruct(
- struct vector *vector,
- struct dc_context *ctx,
- uint32_t count,
- void *initial_value,
- uint32_t struct_size)
+static bool dal_vector_presized_costruct(struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t count,
+ void *initial_value,
+ uint32_t struct_size)
{
uint32_t i;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 27af9d3c2b73..6b3190447581 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -96,7 +96,7 @@ struct dc_bios *bios_parser_create(
struct bp_init_data *init,
enum dce_version dce_version)
{
- struct bios_parser *bp = NULL;
+ struct bios_parser *bp;
bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
if (!bp)
@@ -2576,7 +2576,7 @@ static struct integrated_info *bios_parser_create_integrated_info(
struct dc_bios *dcb)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
- struct integrated_info *info = NULL;
+ struct integrated_info *info;
info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
@@ -2593,11 +2593,10 @@ static struct integrated_info *bios_parser_create_integrated_info(
return NULL;
}
-static enum bp_result update_slot_layout_info(
- struct dc_bios *dcb,
- unsigned int i,
- struct slot_layout_info *slot_layout_info,
- unsigned int record_offset)
+static enum bp_result update_slot_layout_info(struct dc_bios *dcb,
+ unsigned int i,
+ struct slot_layout_info *slot_layout_info,
+ unsigned int record_offset)
{
unsigned int j;
struct bios_parser *bp;
@@ -2696,10 +2695,9 @@ static enum bp_result update_slot_layout_info(
}
-static enum bp_result get_bracket_layout_record(
- struct dc_bios *dcb,
- unsigned int bracket_layout_id,
- struct slot_layout_info *slot_layout_info)
+static enum bp_result get_bracket_layout_record(struct dc_bios *dcb,
+ unsigned int bracket_layout_id,
+ struct slot_layout_info *slot_layout_info)
{
unsigned int i;
unsigned int record_offset;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index cce47d3f1a13..484d62bcf2c2 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -340,9 +340,8 @@ static struct atom_display_object_path_v2 *get_bios_object(
}
/* from graphics_object_id, find display path which includes the object_id */
-static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(
- struct bios_parser *bp,
- struct graphics_object_id id)
+static struct atom_display_object_path_v3 *get_bios_object_from_path_v3(struct bios_parser *bp,
+ struct graphics_object_id id)
{
unsigned int i;
struct graphics_object_id obj_id = {0};
@@ -521,9 +520,8 @@ static enum bp_result get_gpio_i2c_info(
return BP_RESULT_OK;
}
-static struct atom_hpd_int_record *get_hpd_record_for_path_v3(
- struct bios_parser *bp,
- struct atom_display_object_path_v3 *object)
+static struct atom_hpd_int_record *get_hpd_record_for_path_v3(struct bios_parser *bp,
+ struct atom_display_object_path_v3 *object)
{
struct atom_common_record_header *header;
uint32_t offset;
@@ -774,20 +772,20 @@ static enum bp_result bios_parser_get_device_tag(
return BP_RESULT_BADINPUT;
switch (bp->object_info_tbl.revision.minor) {
- case 4:
- default:
+ case 4:
+ default:
/* getBiosObject will return MXM object */
- object = get_bios_object(bp, connector_object_id);
+ object = get_bios_object(bp, connector_object_id);
if (!object) {
BREAK_TO_DEBUGGER(); /* Invalid object id */
return BP_RESULT_BADINPUT;
}
- info->acpi_device = 0; /* BIOS no longer provides this */
- info->dev_id = device_type_from_device_id(object->device_tag);
- break;
- case 5:
+ info->acpi_device = 0; /* BIOS no longer provides this */
+ info->dev_id = device_type_from_device_id(object->device_tag);
+ break;
+ case 5:
object_path_v3 = get_bios_object_from_path_v3(bp, connector_object_id);
if (!object_path_v3) {
@@ -1582,13 +1580,13 @@ static bool bios_parser_is_device_id_supported(
uint32_t mask = get_support_mask_for_device_id(id);
switch (bp->object_info_tbl.revision.minor) {
- case 4:
- default:
- return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) & mask) != 0;
- break;
- case 5:
- return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0;
- break;
+ case 4:
+ default:
+ return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) & mask) != 0;
+ break;
+ case 5:
+ return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0;
+ break;
}
return false;
@@ -1757,7 +1755,7 @@ static enum bp_result bios_parser_get_firmware_info(
case 2:
case 3:
result = get_firmware_info_v3_2(bp, info);
- break;
+ break;
case 4:
result = get_firmware_info_v3_4(bp, info);
break;
@@ -2175,9 +2173,8 @@ static struct atom_disp_connector_caps_record *get_disp_connector_caps_record(
return NULL;
}
-static struct atom_connector_caps_record *get_connector_caps_record(
- struct bios_parser *bp,
- struct atom_display_object_path_v3 *object)
+static struct atom_connector_caps_record *get_connector_caps_record(struct bios_parser *bp,
+ struct atom_display_object_path_v3 *object)
{
struct atom_common_record_header *header;
uint32_t offset;
@@ -2228,7 +2225,7 @@ static enum bp_result bios_parser_get_disp_connector_caps_info(
return BP_RESULT_BADINPUT;
switch (bp->object_info_tbl.revision.minor) {
- case 4:
+ case 4:
default:
object = get_bios_object(bp, object_id);
@@ -2264,9 +2261,8 @@ static enum bp_result bios_parser_get_disp_connector_caps_info(
return BP_RESULT_OK;
}
-static struct atom_connector_speed_record *get_connector_speed_cap_record(
- struct bios_parser *bp,
- struct atom_display_object_path_v3 *object)
+static struct atom_connector_speed_record *get_connector_speed_cap_record(struct bios_parser *bp,
+ struct atom_display_object_path_v3 *object)
{
struct atom_common_record_header *header;
uint32_t offset;
@@ -3090,7 +3086,7 @@ static struct integrated_info *bios_parser_create_integrated_info(
struct dc_bios *dcb)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
- struct integrated_info *info = NULL;
+ struct integrated_info *info;
info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
@@ -3679,7 +3675,7 @@ struct dc_bios *firmware_parser_create(
struct bp_init_data *init,
enum dce_version dce_version)
{
- struct bios_parser *bp = NULL;
+ struct bios_parser *bp;
bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
if (!bp)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 1ef9e4053bb7..90a02d7bd3da 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -123,9 +123,7 @@ static void encoder_control_dmcub(
sizeof(cmd.digx_encoder_control.header);
cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result encoder_control_digx_v1_5(
@@ -261,9 +259,7 @@ static void transmitter_control_dmcub(
sizeof(cmd.dig1_transmitter_control.header);
cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result transmitter_control_v1_6(
@@ -325,9 +321,7 @@ static void transmitter_control_dmcub_v1_7(
sizeof(cmd.dig1_transmitter_control.header);
cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result transmitter_control_v1_7(
@@ -435,9 +429,7 @@ static void set_pixel_clock_dmcub(
sizeof(cmd.set_pixel_clock.header);
cmd.set_pixel_clock.pixel_clock.clk = *clk;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result set_pixel_clock_v7(
@@ -804,9 +796,7 @@ static void enable_disp_power_gating_dmcub(
sizeof(cmd.enable_disp_power_gating.header);
cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result enable_disp_power_gating_v2_1(
@@ -1016,10 +1006,7 @@ static void enable_lvtma_control_dmcub(
panel_instance;
cmd.lvtma_control.data.bypass_panel_control_wait =
bypass_panel_control_wait;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
-
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result enable_lvtma_control(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 6127d6045336..dcedf9645161 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -117,6 +117,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
continue;
clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
dc->link_srv->edp_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
+ dc->link_srv->edp_set_replay_allow_active(edp_link, &allow_active, false, false, NULL);
}
}
@@ -137,6 +138,8 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
continue;
dc->link_srv->edp_set_psr_allow_active(edp_link,
&clk_mgr->psr_allow_active_cache, false, false, NULL);
+ dc->link_srv->edp_set_replay_allow_active(edp_link,
+ &clk_mgr->psr_allow_active_cache, false, false, NULL);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index 934e6423dc1a..1f36ad8a7de4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -111,12 +111,10 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
bp->funcs->set_dce_clock(bp, &dce_clk_params);
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
- if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
- dmcu->funcs->set_psr_wait_loop(dmcu,
- actual_clock / 1000 / 7);
- }
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
}
clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
@@ -153,12 +151,10 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
- if (clk_mgr->dfs_bypass_disp_clk != actual_clock)
- dmcu->funcs->set_psr_wait_loop(dmcu,
- actual_clock / 1000 / 7);
- }
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ if (clk_mgr->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
}
clk_mgr->dfs_bypass_disp_clk = actual_clock;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 450eaead4f20..89b79dd39628 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -135,12 +135,10 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
VBIOSSMC_MSG_SetDispclkFreq,
khz_to_mhz_ceil(requested_dispclk_khz));
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
- if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
- dmcu->funcs->set_psr_wait_loop(dmcu,
- actual_dispclk_set_mhz / 7);
- }
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_dispclk_set_mhz / 7);
}
return actual_dispclk_set_mhz * 1000;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 650f3b4b562e..c435f7632e8e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -531,6 +531,11 @@ void dcn20_clk_mgr_construct(
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
+ int dprefclk_did;
+ int target_div;
+ uint32_t pll_req_reg;
+ struct fixed31_32 pll_req;
+
clk_mgr->base.ctx = ctx;
clk_mgr->pp_smu = pp_smu;
clk_mgr->base.funcs = &dcn2_funcs;
@@ -547,42 +552,34 @@ void dcn20_clk_mgr_construct(
clk_mgr->base.dprefclk_khz = 700000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
- dcn2_funcs.update_clocks = dcn2_update_clocks_fpga;
- clk_mgr->base.dentist_vco_freq_khz = 3850000;
+ /* DFS Slice 2 should be used for DPREFCLK */
+ dprefclk_did = REG_READ(CLK3_CLK2_DFS_CNTL);
+ /* Convert DPREFCLK DFS Slice DID to actual divider */
+ target_div = dentist_get_divider_from_did(dprefclk_did);
+ /* get FbMult value */
+ pll_req_reg = REG_READ(CLK3_CLK_PLL_REQ);
- } else {
- /* DFS Slice 2 should be used for DPREFCLK */
- int dprefclk_did = REG_READ(CLK3_CLK2_DFS_CNTL);
- /* Convert DPREFCLK DFS Slice DID to actual divider*/
- int target_div = dentist_get_divider_from_did(dprefclk_did);
-
- /* get FbMult value */
- uint32_t pll_req_reg = REG_READ(CLK3_CLK_PLL_REQ);
- struct fixed31_32 pll_req;
-
- /* set up a fixed-point number
- * this works because the int part is on the right edge of the register
- * and the frac part is on the left edge
- */
+ /* set up a fixed-point number
+ * this works because the int part is on the right edge of the register
+ * and the frac part is on the left edge
+ */
- pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int);
- pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac;
+ pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int);
+ pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac;
- /* multiply by REFCLK period */
- pll_req = dc_fixpt_mul_int(pll_req, 100000);
+ /* multiply by REFCLK period */
+ pll_req = dc_fixpt_mul_int(pll_req, 100000);
- /* integer part is now VCO frequency in kHz */
- clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req);
+ /* integer part is now VCO frequency in kHz */
+ clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req);
- /* in case we don't get a value from the register, use default */
- if (clk_mgr->base.dentist_vco_freq_khz == 0)
- clk_mgr->base.dentist_vco_freq_khz = 3850000;
+ /* in case we don't get a value from the register, use default */
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3850000;
- /* Calculate the DPREFCLK in kHz.*/
- clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
- }
+ /* Calculate the DPREFCLK in kHz.*/
+ clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
//Integrated_info table does not exist on dGPU projects so should not be referenced
//anywhere in code for dGPUs.
//Also there is no plan for now that DFS BYPASS will be used on NV10/12/14.
@@ -590,4 +587,3 @@ void dcn20_clk_mgr_construct(
dce_clock_read_ss_info(clk_mgr);
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index 811720749faf..694fe4271b4d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -190,23 +190,17 @@ void dcn201_clk_mgr_construct(struct dc_context *ctx,
clk_mgr->dprefclk_ss_divider = 1000;
clk_mgr->ss_on_dprefclk = false;
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
- dcn201_funcs.update_clocks = dcn2_update_clocks_fpga;
- clk_mgr->base.dprefclk_khz = 600000;
- clk_mgr->base.dentist_vco_freq_khz = 3000000;
- } else {
- clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT);
- clk_mgr->base.dprefclk_khz *= 100;
+ clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT);
+ clk_mgr->base.dprefclk_khz *= 100;
- if (clk_mgr->base.dprefclk_khz == 0)
- clk_mgr->base.dprefclk_khz = 600000;
+ if (clk_mgr->base.dprefclk_khz == 0)
+ clk_mgr->base.dprefclk_khz = 600000;
- REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz);
- clk_mgr->base.dentist_vco_freq_khz *= 100000;
+ REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz);
+ clk_mgr->base.dentist_vco_freq_khz *= 100000;
- if (clk_mgr->base.dentist_vco_freq_khz == 0)
- clk_mgr->base.dentist_vco_freq_khz = 3000000;
- }
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3000000;
if (!debug->disable_dfs_bypass && bp->integrated_info)
if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index bd9fd0b54f46..0c6a4ab72b1d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -705,6 +705,7 @@ void rn_clk_mgr_construct(
struct dpm_clocks clock_table = { 0 };
enum pp_smu_status status = 0;
int is_green_sardine = 0;
+ struct clk_log_info log_info = {0};
#if defined(CONFIG_DRM_AMD_DC_FP)
is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
@@ -725,48 +726,41 @@ void rn_clk_mgr_construct(
clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
- dcn21_funcs.update_clocks = dcn2_update_clocks_fpga;
+ clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr);
+
+ /* SMU Version 55.51.0 and up no longer have an issue
+ * that needs to limit minimum dispclk */
+ if (clk_mgr->smu_ver >= SMU_VER_55_51_0)
+ debug->min_disp_clk_khz = 0;
+
+ /* TODO: Check we get what we expect during bringup */
+ clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
+
+ /* in case we don't get a value from the register, use default */
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 3600000;
- } else {
- struct clk_log_info log_info = {0};
-
- clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr);
-
- /* SMU Version 55.51.0 and up no longer have an issue
- * that needs to limit minimum dispclk */
- if (clk_mgr->smu_ver >= SMU_VER_55_51_0)
- debug->min_disp_clk_khz = 0;
-
- /* TODO: Check we get what we expect during bringup */
- clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
-
- /* in case we don't get a value from the register, use default */
- if (clk_mgr->base.dentist_vco_freq_khz == 0)
- clk_mgr->base.dentist_vco_freq_khz = 3600000;
-
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) {
- if (clk_mgr->periodic_retraining_disabled) {
- rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt;
- } else {
- if (is_green_sardine)
- rn_bw_params.wm_table = lpddr4_wm_table_gs;
- else
- rn_bw_params.wm_table = lpddr4_wm_table_rn;
- }
+
+ if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) {
+ if (clk_mgr->periodic_retraining_disabled) {
+ rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt;
} else {
if (is_green_sardine)
- rn_bw_params.wm_table = ddr4_wm_table_gs;
- else {
- if (ctx->dc->config.is_single_rank_dimm)
- rn_bw_params.wm_table = ddr4_1R_wm_table_rn;
- else
- rn_bw_params.wm_table = ddr4_wm_table_rn;
- }
+ rn_bw_params.wm_table = lpddr4_wm_table_gs;
+ else
+ rn_bw_params.wm_table = lpddr4_wm_table_rn;
+ }
+ } else {
+ if (is_green_sardine)
+ rn_bw_params.wm_table = ddr4_wm_table_gs;
+ else {
+ if (ctx->dc->config.is_single_rank_dimm)
+ rn_bw_params.wm_table = ddr4_1R_wm_table_rn;
+ else
+ rn_bw_params.wm_table = ddr4_wm_table_rn;
}
- /* Saved clocks configured at boot for debug purposes */
- rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
}
+ /* Saved clocks configured at boot for debug purposes */
+ rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
clk_mgr->base.dprefclk_khz = 600000;
dce_clock_read_ss_info(clk_mgr);
@@ -786,9 +780,8 @@ void rn_clk_mgr_construct(
}
}
- if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) {
- /* enable powerfeatures when displaycount goes to 0 */
+ /* enable powerfeatures when displaycount goes to 0 */
+ if (clk_mgr->smu_ver >= 0x00371500)
rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 27fbe906682f..8c9d45e5b13b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -147,17 +147,14 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
VBIOSSMC_MSG_SetDispclkFreq,
khz_to_mhz_ceil(requested_dispclk_khz));
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
- if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
- dmcu->funcs->set_psr_wait_loop(dmcu,
- actual_dispclk_set_mhz / 7);
- }
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_dispclk_set_mhz / 7);
}
// pmfw always set clock more than or equal requested clock
- if (!IS_DIAG_DC(dc->ctx->dce_environment))
- ASSERT(actual_dispclk_set_mhz >= khz_to_mhz_ceil(requested_dispclk_khz));
+ ASSERT(actual_dispclk_set_mhz >= khz_to_mhz_ceil(requested_dispclk_khz));
return actual_dispclk_set_mhz * 1000;
}
@@ -221,15 +218,13 @@ void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phy
int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
{
int actual_dppclk_set_mhz = -1;
- struct dc *dc = clk_mgr->base.ctx->dc;
actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDppclkFreq,
khz_to_mhz_ceil(requested_dpp_khz));
- if (!IS_DIAG_DC(dc->ctx->dce_environment))
- ASSERT(actual_dppclk_set_mhz >= khz_to_mhz_ceil(requested_dpp_khz));
+ ASSERT(actual_dppclk_set_mhz >= khz_to_mhz_ceil(requested_dpp_khz));
return actual_dppclk_set_mhz * 1000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 694a9d3d92ae..3271c8c7905d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -206,7 +206,6 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
bool force_reset = false;
bool update_uclk = false;
bool p_state_change_support;
- int total_plane_count;
if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
return;
@@ -247,8 +246,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
- p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+ p_state_change_support = new_clocks->p_state_change_support;
// invalidate the current P-State forced min in certain dc_mode_softmax situations
if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) {
@@ -523,6 +521,8 @@ void dcn3_clk_mgr_construct(
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
+ struct clk_state_registers_and_bypass s = { 0 };
+
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn3_funcs;
clk_mgr->regs = &clk_mgr_regs;
@@ -539,27 +539,19 @@ void dcn3_clk_mgr_construct(
clk_mgr->base.dprefclk_khz = 730000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
- clk_mgr->base.funcs = &dcn3_fpga_funcs;
- clk_mgr->base.dentist_vco_freq_khz = 3650000;
-
- } else {
- struct clk_state_registers_and_bypass s = { 0 };
+ /* integer part is now VCO frequency in kHz */
+ clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr);
- /* integer part is now VCO frequency in kHz */
- clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr);
-
- /* in case we don't get a value from the register, use default */
- if (clk_mgr->base.dentist_vco_freq_khz == 0)
- clk_mgr->base.dentist_vco_freq_khz = 3650000;
- /* Convert dprefclk units from MHz to KHz */
- /* Value already divided by 10, some resolution lost */
+ /* in case we don't get a value from the register, use default */
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3650000;
+ /* Convert dprefclk units from MHz to KHz */
+ /* Value already divided by 10, some resolution lost */
- /*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */
- //ASSERT(s.dprefclk != 0);
- if (s.dprefclk != 0)
- clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
- }
+ /*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */
+ //ASSERT(s.dprefclk != 0);
+ if (s.dprefclk != 0)
+ clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
clk_mgr->dfs_bypass_enabled = false;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
index 1fbf1c105dc1..bdbf18306698 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -312,6 +312,9 @@ void dcn30_smu_set_display_refresh_from_mall(struct clk_mgr_internal *clk_mgr, b
/* bits 8:7 for cache timer scale, bits 6:1 for cache timer delay, bit 0 = 1 for enable, = 0 for disable */
uint32_t param = (cache_timer_scale << 7) | (cache_timer_delay << 1) | (enable ? 1 : 0);
+ smu_print("SMU Set display refresh from mall: enable = %d, cache_timer_delay = %d, cache_timer_scale = %d\n",
+ enable, cache_timer_delay, cache_timer_scale);
+
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetDisplayRefreshFromMall, param, NULL);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 01383aac6b41..a5489fe6875f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -117,7 +117,7 @@ static void vg_update_clocks(struct clk_mgr *clk_mgr_base,
display_count = vg_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
- if (display_count == 0 && !IS_DIAG_DC(dc->ctx->dce_environment)) {
+ if (display_count == 0) {
union display_idle_optimization_u idle_info = { 0 };
idle_info.idle_info.df_request_disabled = 1;
@@ -151,10 +151,8 @@ static void vg_update_clocks(struct clk_mgr *clk_mgr_base,
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < 100000)
- new_clocks->dppclk_khz = 100000;
- }
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
@@ -664,6 +662,7 @@ void vg_clk_mgr_construct(
struct dccg *dccg)
{
struct smu_dpm_clks smu_dpm_clks = { 0 };
+ struct clk_log_info log_info = {0};
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &vg_funcs;
@@ -703,32 +702,25 @@ void vg_clk_mgr_construct(
ASSERT(smu_dpm_clks.dpm_clks);
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
- vg_funcs.update_clocks = dcn2_update_clocks_fpga;
- clk_mgr->base.base.dentist_vco_freq_khz = 3600000;
- } else {
- struct clk_log_info log_info = {0};
+ clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base);
- clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base);
+ if (clk_mgr->base.smu_ver)
+ clk_mgr->base.smu_present = true;
- if (clk_mgr->base.smu_ver)
- clk_mgr->base.smu_present = true;
+ /* TODO: Check we get what we expect during bringup */
+ clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
- /* TODO: Check we get what we expect during bringup */
- clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
-
- /* in case we don't get a value from the register, use default */
- if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
- clk_mgr->base.base.dentist_vco_freq_khz = 3600000;
+ /* in case we don't get a value from the register, use default */
+ if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.base.dentist_vco_freq_khz = 3600000;
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
- vg_bw_params.wm_table = lpddr5_wm_table;
- } else {
- vg_bw_params.wm_table = ddr4_wm_table;
- }
- /* Saved clocks configured at boot for debug purposes */
- vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
+ if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ vg_bw_params.wm_table = lpddr5_wm_table;
+ } else {
+ vg_bw_params.wm_table = ddr4_wm_table;
}
+ /* Saved clocks configured at boot for debug purposes */
+ vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
clk_mgr->base.base.dprefclk_khz = 600000;
dce_clock_read_ss_info(&clk_mgr->base);
@@ -746,12 +738,6 @@ void vg_clk_mgr_construct(
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
smu_dpm_clks.dpm_clks);
-/*
- if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->base.smu_ver) {
- enable powerfeatures when displaycount goes to 0
- dcn301_smu_enable_phy_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
- }
-*/
}
void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index f9e2e0c3095e..3db4ef564b99 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
+
+ /* Checking stream / link detection ensuring that PHY is active*/
+ if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
+ display_count++;
+
}
for (i = 0; i < dc->link_count; i++) {
@@ -205,10 +210,8 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < 100000)
- new_clocks->dppclk_khz = 100000;
- }
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
@@ -250,9 +253,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
@@ -674,6 +675,7 @@ void dcn31_clk_mgr_construct(
struct dccg *dccg)
{
struct dcn31_smu_dpm_clks smu_dpm_clks = { 0 };
+ struct clk_log_info log_info = {0};
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn31_funcs;
@@ -713,29 +715,22 @@ void dcn31_clk_mgr_construct(
ASSERT(smu_dpm_clks.dpm_clks);
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
- clk_mgr->base.base.funcs = &dcn3_fpga_funcs;
- } else {
- struct clk_log_info log_info = {0};
-
- clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base);
+ clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base);
- if (clk_mgr->base.smu_ver)
- clk_mgr->base.smu_present = true;
+ if (clk_mgr->base.smu_ver)
+ clk_mgr->base.smu_present = true;
- /* TODO: Check we get what we expect during bringup */
- clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
-
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
- dcn31_bw_params.wm_table = lpddr5_wm_table;
- } else {
- dcn31_bw_params.wm_table = ddr5_wm_table;
- }
- /* Saved clocks configured at boot for debug purposes */
- dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
- &clk_mgr->base.base, &log_info);
+ /* TODO: Check we get what we expect during bringup */
+ clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+ if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ dcn31_bw_params.wm_table = lpddr5_wm_table;
+ } else {
+ dcn31_bw_params.wm_table = ddr5_wm_table;
}
+ /* Saved clocks configured at boot for debug purposes */
+ dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
+ &clk_mgr->base.base, &log_info);
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 0827c7df2855..32279c5db724 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -130,7 +130,7 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
if (result == VBIOSSMC_Result_Failed) {
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
param == TABLE_WATERMARKS)
- DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+ DC_LOG_DEBUG("Watermarks table not configured properly by SMU");
else
ASSERT(0);
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 5cb44f838bde..7326b7565846 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -241,10 +241,8 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < 100000)
- new_clocks->dppclk_khz = 100000;
- }
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
@@ -286,9 +284,7 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
@@ -405,32 +401,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 16.5,
- .sr_enter_plus_exit_time_us = 18.5,
+ .sr_exit_time_us = 30.0,
+ .sr_enter_plus_exit_time_us = 32.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 16.5,
- .sr_enter_plus_exit_time_us = 18.5,
+ .sr_exit_time_us = 30.0,
+ .sr_enter_plus_exit_time_us = 32.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 16.5,
- .sr_enter_plus_exit_time_us = 18.5,
+ .sr_exit_time_us = 30.0,
+ .sr_enter_plus_exit_time_us = 32.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 16.5,
- .sr_enter_plus_exit_time_us = 18.5,
+ .sr_exit_time_us = 30.0,
+ .sr_enter_plus_exit_time_us = 32.0,
.valid = true,
},
}
@@ -726,6 +722,7 @@ void dcn314_clk_mgr_construct(
struct dccg *dccg)
{
struct dcn314_smu_dpm_clks smu_dpm_clks = { 0 };
+ struct clk_log_info log_info = {0};
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn314_funcs;
@@ -765,35 +762,27 @@ void dcn314_clk_mgr_construct(
ASSERT(smu_dpm_clks.dpm_clks);
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
- clk_mgr->base.base.funcs = &dcn3_fpga_funcs;
- } else {
- struct clk_log_info log_info = {0};
-
- clk_mgr->base.smu_ver = dcn314_smu_get_smu_version(&clk_mgr->base);
+ clk_mgr->base.smu_ver = dcn314_smu_get_smu_version(&clk_mgr->base);
- if (clk_mgr->base.smu_ver)
- clk_mgr->base.smu_present = true;
+ if (clk_mgr->base.smu_ver)
+ clk_mgr->base.smu_present = true;
- /* TODO: Check we get what we expect during bringup */
- clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+ /* TODO: Check we get what we expect during bringup */
+ clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType)
- dcn314_bw_params.wm_table = lpddr5_wm_table;
- else
- dcn314_bw_params.wm_table = ddr5_wm_table;
+ if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType)
+ dcn314_bw_params.wm_table = lpddr5_wm_table;
+ else
+ dcn314_bw_params.wm_table = ddr5_wm_table;
- /* Saved clocks configured at boot for debug purposes */
- dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
- &clk_mgr->base.base, &log_info);
-
- }
+ /* Saved clocks configured at boot for debug purposes */
+ dcn314_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
+ &clk_mgr->base.base, &log_info);
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
dce_clock_read_ss_info(&clk_mgr->base);
/*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
- //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz);
clk_mgr->base.base.bw_params = &dcn314_bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index 0765334f0825..07baa10a8647 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -145,7 +145,7 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
if (result == VBIOSSMC_Result_Failed) {
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
param == TABLE_WATERMARKS)
- DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+ DC_LOG_DEBUG("Watermarks table not configured properly by SMU");
else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq ||
msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk)
DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS");
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index b737cbc468f5..b2c4f97afc8b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -184,12 +184,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
- new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
- if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
- new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
- }
+ if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
+ if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
@@ -234,9 +232,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
@@ -602,6 +598,7 @@ void dcn315_clk_mgr_construct(
struct dccg *dccg)
{
struct dcn315_smu_dpm_clks smu_dpm_clks = { 0 };
+ struct clk_log_info log_info = {0};
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn315_funcs;
@@ -641,26 +638,19 @@ void dcn315_clk_mgr_construct(
ASSERT(smu_dpm_clks.dpm_clks);
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
- clk_mgr->base.base.funcs = &dcn3_fpga_funcs;
- } else {
- struct clk_log_info log_info = {0};
-
- clk_mgr->base.smu_ver = dcn315_smu_get_smu_version(&clk_mgr->base);
+ clk_mgr->base.smu_ver = dcn315_smu_get_smu_version(&clk_mgr->base);
- if (clk_mgr->base.smu_ver > 0)
- clk_mgr->base.smu_present = true;
-
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
- dcn315_bw_params.wm_table = lpddr5_wm_table;
- } else {
- dcn315_bw_params.wm_table = ddr5_wm_table;
- }
- /* Saved clocks configured at boot for debug purposes */
- dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
- &clk_mgr->base.base, &log_info);
+ if (clk_mgr->base.smu_ver > 0)
+ clk_mgr->base.smu_present = true;
+ if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ dcn315_bw_params.wm_table = lpddr5_wm_table;
+ } else {
+ dcn315_bw_params.wm_table = ddr5_wm_table;
}
+ /* Saved clocks configured at boot for debug purposes */
+ dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
+ &clk_mgr->base.base, &log_info);
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 925d6e13620e..3e0da873cf4c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -33,28 +33,26 @@
#define MAX_INSTANCE 6
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
#define regBIF_BX_PF2_RSMU_INDEX 0x0000
#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 93db4dbee713..09151cc56ce4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -45,24 +45,14 @@
#define MAX_INSTANCE 7
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
-static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0 } },
- { { 0x00016E00, 0x02401C00, 0, 0, 0, 0 } },
- { { 0x00017000, 0x02402000, 0, 0, 0, 0 } },
- { { 0x00017200, 0x02402400, 0, 0, 0, 0 } },
- { { 0x0001B000, 0x0242D800, 0, 0, 0, 0 } },
- { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0 } },
- { { 0x0001B400, 0x0242E000, 0, 0, 0, 0 } } } };
-
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
@@ -73,9 +63,6 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
-#define REG(reg_name) \
- (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
-
#define TO_CLK_MGR_DCN316(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn316, base)
@@ -207,12 +194,10 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < 100000)
- new_clocks->dppclk_khz = 100000;
- if (new_clocks->dispclk_khz < 100000)
- new_clocks->dispclk_khz = 100000;
- }
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
+ if (new_clocks->dispclk_khz < 100000)
+ new_clocks->dispclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
@@ -254,9 +239,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
@@ -581,36 +564,6 @@ static struct clk_mgr_funcs dcn316_funcs = {
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
-static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
-{
- /* get FbMult value */
- struct fixed31_32 pll_req;
- unsigned int fbmult_frac_val = 0;
- unsigned int fbmult_int_val = 0;
-
- /*
- * Register value of fbmult is in 8.16 format, we are converting to 31.32
- * to leverage the fix point operations available in driver
- */
-
- REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
- REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
-
- pll_req = dc_fixpt_from_int(fbmult_int_val);
-
- /*
- * since fractional part is only 16 bit in register definition but is 32 bit
- * in our fix point definiton, need to shift left by 16 to obtain correct value
- */
- pll_req.value |= fbmult_frac_val << 16;
-
- /* multiply by REFCLK period */
- pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
-
- /* integer part is now VCO frequency in kHz */
- return dc_fixpt_floor(pll_req);
-}
-
void dcn316_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn316 *clk_mgr,
@@ -618,6 +571,7 @@ void dcn316_clk_mgr_construct(
struct dccg *dccg)
{
struct dcn316_smu_dpm_clks smu_dpm_clks = { 0 };
+ struct clk_log_info log_info = {0};
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn316_funcs;
@@ -657,35 +611,28 @@ void dcn316_clk_mgr_construct(
ASSERT(smu_dpm_clks.dpm_clks);
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
- clk_mgr->base.base.funcs = &dcn3_fpga_funcs;
- clk_mgr->base.base.dentist_vco_freq_khz = 2500000;
- } else {
- struct clk_log_info log_info = {0};
-
- clk_mgr->base.smu_ver = dcn316_smu_get_smu_version(&clk_mgr->base);
-
- if (clk_mgr->base.smu_ver > 0)
- clk_mgr->base.smu_present = true;
+ clk_mgr->base.smu_ver = dcn316_smu_get_smu_version(&clk_mgr->base);
- // Skip this for now as it did not work on DCN315, renable during bring up
- clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+ if (clk_mgr->base.smu_ver > 0)
+ clk_mgr->base.smu_present = true;
- /* in case we don't get a value from the register, use default */
- if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
- clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */
+ // Skip this for now as it did not work on DCN315, renable during bring up
+ //clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+ clk_mgr->base.base.dentist_vco_freq_khz = 2500000;
+ /* in case we don't get a value from the register, use default */
+ if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */
- if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
- dcn316_bw_params.wm_table = lpddr5_wm_table;
- } else {
- dcn316_bw_params.wm_table = ddr4_wm_table;
- }
- /* Saved clocks configured at boot for debug purposes */
- dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
- &clk_mgr->base.base, &log_info);
+ if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ dcn316_bw_params.wm_table = lpddr5_wm_table;
+ } else {
+ dcn316_bw_params.wm_table = ddr4_wm_table;
}
+ /* Saved clocks configured at boot for debug purposes */
+ dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
+ &clk_mgr->base.base, &log_info);
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
index 457a9254ae1c..3ed19197a755 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
@@ -34,23 +34,21 @@
#define MAX_INSTANCE 7
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0, 0 } } } };
#define REG(reg_name) \
(MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 8d9444db092a..984b52923534 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -182,23 +182,32 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_entries_per_clk->num_dcfclk_levels);
+ clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
/* SOCCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
&num_entries_per_clk->num_socclk_levels);
+ clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
/* DTBCLK */
- if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch)
+ if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch) {
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_entries_per_clk->num_dtbclk_levels);
+ clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz =
+ dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
+ }
/* DISPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_entries_per_clk->num_dispclk_levels);
num_levels = num_entries_per_clk->num_dispclk_levels;
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
+ //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
+ if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
if (num_entries_per_clk->num_dcfclk_levels &&
num_entries_per_clk->num_dtbclk_levels &&
@@ -233,6 +242,32 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
DC_FP_END();
}
+static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context,
+ int ref_dtbclk_khz)
+{
+ struct dccg *dccg = clk_mgr->dccg;
+ uint32_t tg_mask = 0;
+ int i;
+
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct dtbclk_dto_params dto_params = {0};
+
+ /* use mask to program DTO once per tg */
+ if (pipe_ctx->stream_res.tg &&
+ !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) {
+ tg_mask |= (1 << pipe_ctx->stream_res.tg->inst);
+
+ dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
+ dto_params.ref_dtbclk_khz = ref_dtbclk_khz;
+
+ dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params);
+ //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params);
+ }
+ }
+}
+
/* Since DPPCLK request to PMFW needs to be exact (due to DPP DTO programming),
* update DPPCLK to be the exact frequency that will be set after the DPPCLK
* divider is updated. This will prevent rounding issues that could cause DPP
@@ -262,7 +297,7 @@ void dcn32_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz, prev_dppclk_khz;
+ int dpp_inst = 0, dppclk_khz, prev_dppclk_khz;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
@@ -433,10 +468,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
bool update_uclk = false, update_fclk = false;
bool p_state_change_support;
bool fclk_p_state_change_support;
- int total_plane_count;
-
- if (dc->work_arounds.skip_clock_update)
- return;
if (clk_mgr_base->clks.dispclk_khz == 0 ||
(dc->debug.force_clock_mode & 0x1)) {
@@ -462,10 +493,10 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
- fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
+ fclk_p_state_change_support = new_clocks->fclk_p_state_change_support;
- if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
+ if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support) &&
+ !dc->work_arounds.clock_update_disable_mask.fclk) {
clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
/* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */
@@ -479,12 +510,14 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz) &&
+ !dc->work_arounds.clock_update_disable_mask.dcfclk) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
}
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz) &&
+ !dc->work_arounds.clock_update_disable_mask.dcfclk_ds) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
}
@@ -502,36 +535,53 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
}
-
- p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ p_state_change_support = new_clocks->p_state_change_support;
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) &&
+ !dc->work_arounds.clock_update_disable_mask.uclk) {
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
- if (!clk_mgr_base->clks.p_state_change_support)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
+ if (!clk_mgr_base->clks.p_state_change_support) {
+ if (dc->clk_mgr->dc_mode_softmax_enabled) {
+ /* On DCN32x we will never have the functional UCLK min above the softmax
+ * since we calculate mode support based on softmax being the max UCLK
+ * frequency.
+ */
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ } else {
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
+ }
+ }
}
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
+ dcn32_smu_wait_for_dmub_ack_mclk(clk_mgr, true);
+ else
+ dcn32_smu_wait_for_dmub_ack_mclk(clk_mgr, false);
+
/* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
update_fclk = true;
}
- if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
+ if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk &&
+ !dc->work_arounds.clock_update_disable_mask.fclk) {
/* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
- if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz) &&
+ !dc->work_arounds.clock_update_disable_mask.uclk) {
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
update_uclk = true;
}
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support &&
- (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
+ (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
+ !dc->work_arounds.clock_update_disable_mask.uclk)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
@@ -570,6 +620,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
/* DCCG requires KHz precision for DTBCLK */
clk_mgr_base->clks.ref_dtbclk_khz =
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
+ dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
}
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
@@ -756,7 +807,7 @@ static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current
khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
else
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
+ clk_mgr_base->bw_params->max_memclk_mhz);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
@@ -771,8 +822,7 @@ static void dcn32_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
if (!clk_mgr->smu_present)
return;
- dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->bw_params->max_memclk_mhz);
}
/* Get current memclk states, update bounding box */
@@ -789,6 +839,8 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
dcn32_init_single_clock(clk_mgr, PPCLK_UCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
&num_entries_per_clk->num_memclk_levels);
+ clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
+ clk_mgr_base->bw_params->dc_mode_softmax_memclk = clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz;
/* memclk must have at least one level */
num_entries_per_clk->num_memclk_levels = num_entries_per_clk->num_memclk_levels ? num_entries_per_clk->num_memclk_levels : 1;
@@ -796,13 +848,15 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
dcn32_init_single_clock(clk_mgr, PPCLK_FCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
&num_entries_per_clk->num_fclk_levels);
+ clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
num_levels = num_entries_per_clk->num_memclk_levels;
} else {
num_levels = num_entries_per_clk->num_fclk_levels;
}
-
+ clk_mgr_base->bw_params->max_memclk_mhz =
+ clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
if (clk_mgr->dpm_present && !num_levels)
@@ -855,6 +909,25 @@ static bool dcn32_is_smu_present(struct clk_mgr *clk_mgr_base)
return clk_mgr->smu_present;
}
+static void dcn32_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
+}
+
+static void dcn32_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz);
+}
static struct clk_mgr_funcs dcn32_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
@@ -865,6 +938,8 @@ static struct clk_mgr_funcs dcn32_funcs = {
.notify_wm_ranges = dcn32_notify_wm_ranges,
.set_hard_min_memclk = dcn32_set_hard_min_memclk,
.set_hard_max_memclk = dcn32_set_hard_max_memclk,
+ .set_max_memclk = dcn32_set_max_memclk,
+ .set_min_memclk = dcn32_set_min_memclk,
.get_memclk_states_from_smu = dcn32_get_memclk_states_from_smu,
.are_clock_states_equal = dcn32_are_clock_states_equal,
.enable_pme_wa = dcn32_enable_pme_wa,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index fb524fe4ab26..700ce42036d7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -139,3 +139,10 @@ unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, ui
return response;
}
+
+void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+ smu_print("PMFW to wait for DMCUB ack for MCLK : %d\n", enable);
+
+ dcn32_smu_send_msg_with_param(clk_mgr, 0x14, enable ? 1 : 0, NULL);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
index a68038a41972..a34c258c19dc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
@@ -43,5 +43,6 @@ void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
+void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable);
#endif /* __DCN32_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 52564b93f7eb..566d7045b2de 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -515,8 +515,7 @@ dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
cmd.secure_display.roi_info.y_end = rect->y + rect->height;
}
- dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
+ dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
}
static inline void
@@ -587,18 +586,15 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
struct crc_params *crc_window, bool enable, bool continuous)
{
- int i;
struct pipe_ctx *pipe;
struct crc_params param;
struct timing_generator *tg;
- for (i = 0; i < MAX_PIPES; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
- break;
- }
+ pipe = resource_get_otg_master_for_stream(
+ &dc->current_state->res_ctx, stream);
+
/* Stream not found */
- if (i == MAX_PIPES)
+ if (pipe == NULL)
return false;
/* By default, capture the full frame */
@@ -858,7 +854,6 @@ static bool dc_construct_ctx(struct dc *dc,
const struct dc_init_data *init_params)
{
struct dc_context *dc_ctx;
- enum dce_version dc_version = DCE_VERSION_UNKNOWN;
dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
if (!dc_ctx)
@@ -876,8 +871,7 @@ static bool dc_construct_ctx(struct dc *dc,
/* Create logger */
- dc_version = resource_parse_asic_id(init_params->asic_id);
- dc_ctx->dce_version = dc_version;
+ dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
dc_ctx->perf_trace = dc_perf_trace_create();
if (!dc_ctx->perf_trace) {
@@ -1050,8 +1044,10 @@ static void disable_all_writeback_pipes_for_stream(
stream->writeback_info[i].wb_enabled = false;
}
-static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
- struct dc_stream_state *stream, bool lock)
+static void apply_ctx_interdependent_lock(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream,
+ bool lock)
{
int i;
@@ -1065,7 +1061,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *contex
// Copied conditions that were previously in dce110_apply_ctx_for_surface
if (stream == pipe_ctx->stream) {
- if (!pipe_ctx->top_pipe &&
+ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
}
@@ -1120,6 +1116,33 @@ static void phantom_pipe_blank(
hws->funcs.wait_for_blank_complete(opp);
}
+static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+{
+ if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+ memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
+
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
+ get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
+ get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
+ get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else {
+ if (dc->ctx->dce_version < DCN_VERSION_2_0)
+ color_space_to_black_color(
+ dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
+ }
+ if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
+ get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
+ get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
+ get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ }
+ }
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -1190,6 +1213,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
+ if (pipe->stream && pipe->plane_state)
+ dc_update_viusal_confirm_color(dc, context, pipe);
+
if (dc->hwss.apply_ctx_for_surface) {
apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
@@ -1602,6 +1628,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
+ if (dc->debug.force_odm_combine)
+ return false;
+
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return false;
@@ -1893,6 +1922,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_trigger_sync(dc, context);
+ /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
+ for (i = 0; i < context->stream_count; i++) {
+ uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
+
+ context->streams[i]->update_flags.raw = 0xFFFFFFFF;
+ context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
+ }
+
/* Program all planes within new context*/
if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, context, true);
@@ -1971,6 +2008,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
+ /* Clear update flags that were set earlier to avoid redundant programming */
+ for (i = 0; i < context->stream_count; i++) {
+ context->streams[i]->update_flags.raw = 0x0;
+ }
+
old_state = dc->current_state;
dc->current_state = context;
@@ -1981,6 +2023,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
return result;
}
+static bool commit_minimal_transition_state(struct dc *dc,
+ struct dc_state *transition_base_context);
+
/**
* dc_commit_streams - Commit current stream state
*
@@ -2002,6 +2047,8 @@ enum dc_status dc_commit_streams(struct dc *dc,
struct dc_state *context;
enum dc_status res = DC_OK;
struct dc_validation_set set[MAX_STREAMS] = {0};
+ struct pipe_ctx *pipe;
+ bool handle_exit_odm2to1 = false;
if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
return res;
@@ -2026,6 +2073,22 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
}
+ /* Check for case where we are going from odm 2:1 to max
+ * pipe scenario. For these cases, we will call
+ * commit_minimal_transition_state() to exit out of odm 2:1
+ * first before processing new streams
+ */
+ if (stream_count == dc->res_pool->pipe_count) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->next_odm_pipe)
+ handle_exit_odm2to1 = true;
+ }
+ }
+
+ if (handle_exit_odm2to1)
+ res = commit_minimal_transition_state(dc, dc->current_state);
+
context = dc_create_state(dc);
if (!context)
goto context_alloc_fail;
@@ -2429,9 +2492,7 @@ static enum surface_update_type get_scaling_info_update_type(
if (!u->scaling_info)
return UPDATE_TYPE_FAST;
- if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
- || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
- || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
+ if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height
|| u->scaling_info->scaling_quality.integer_scaling !=
u->surface->scaling_quality.integer_scaling
@@ -2483,9 +2544,6 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
union surface_update_flags *update_flags = &u->surface->update_flags;
- if (u->flip_addr)
- update_flags->bits.addr_update = 1;
-
if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
update_flags->raw = 0xFFFFFFFF;
return UPDATE_TYPE_FULL;
@@ -2544,15 +2602,19 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
elevate_update_type(&overall_type, type);
}
- if (update_flags->bits.input_csc_change
- || update_flags->bits.coeff_reduction_change
- || update_flags->bits.lut_3d
- || update_flags->bits.gamma_change
- || update_flags->bits.gamut_remap_change) {
+ if (update_flags->bits.lut_3d) {
type = UPDATE_TYPE_FULL;
elevate_update_type(&overall_type, type);
}
+ if (dc->debug.enable_legacy_fast_update &&
+ (update_flags->bits.gamma_change ||
+ update_flags->bits.gamut_remap_change ||
+ update_flags->bits.input_csc_change ||
+ update_flags->bits.coeff_reduction_change)) {
+ type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, type);
+ }
return overall_type;
}
@@ -2585,7 +2647,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
stream_update->integer_scaling_update)
su_flags->bits.scaling = 1;
- if (stream_update->out_transfer_func)
+ if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
@@ -2605,14 +2667,23 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->mst_bw_update)
su_flags->bits.mst_bw = 1;
- if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
- su_flags->bits.crtc_timing_adjust = 1;
+
+ if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
+ (stream_update->vrr_infopacket || stream_update->allow_freesync ||
+ stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
+ su_flags->bits.fams_changed = 1;
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
if (stream_update->output_csc_transform || stream_update->output_color_space)
su_flags->bits.out_csc = 1;
+
+ /* Output transfer function changes do not require bandwidth recalculation,
+ * so don't trigger a full update
+ */
+ if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ su_flags->bits.out_tf = 1;
}
for (i = 0 ; i < surface_count; i++) {
@@ -2625,96 +2696,6 @@ static enum surface_update_type check_update_surfaces_for_stream(
return overall_type;
}
-static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect)
-{
- int view_height, view_width, clip_x, clip_y, clip_width, clip_height;
-
- view_height = src.height;
- view_width = src.width;
-
- clip_x = clip_rect.x;
- clip_y = clip_rect.y;
-
- clip_width = clip_rect.width;
- clip_height = clip_rect.height;
-
- /* check for centered video accounting for off by 1 scaling truncation */
- if ((view_height - clip_y - clip_height <= clip_y + 1) &&
- (view_width - clip_x - clip_width <= clip_x + 1) &&
- (view_height - clip_y - clip_height >= clip_y - 1) &&
- (view_width - clip_x - clip_width >= clip_x - 1)) {
-
- /* when OS scales up/down to letter box, it may end up
- * with few blank pixels on the border due to truncating.
- * Add offset margin to account for this
- */
- if (clip_x <= 4 || clip_y <= 4)
- return true;
- }
-
- return false;
-}
-
-static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc,
- struct dc_surface_update *srf_updates, int surface_count,
- enum surface_update_type update_type)
-{
- enum surface_update_type new_update_type = update_type;
- int i, j;
- struct pipe_ctx *pipe = NULL;
- struct dc_stream_state *stream;
-
- /* Check that we are in windowed MPO with ODM
- * - look for MPO pipe by scanning pipes for first pipe matching
- * surface that has moved ( position change )
- * - MPO pipe will have top pipe
- * - check that top pipe has ODM pointer
- */
- if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) {
- for (i = 0; i < surface_count; i++) {
- if (srf_updates[i].surface && srf_updates[i].scaling_info
- && srf_updates[i].surface->update_flags.bits.position_change) {
-
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[j];
- stream = pipe->stream;
- break;
- }
- }
-
- if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream
- && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) {
- struct rect old_clip_rect, new_clip_rect;
- bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle;
- bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle;
-
- old_clip_rect = srf_updates[i].surface->clip_rect;
- new_clip_rect = srf_updates[i].scaling_info->clip_rect;
-
- old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
- old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
- old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right;
-
- new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
- new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
- new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right;
-
- if (old_clip_rect_left && new_clip_rect_middle)
- new_update_type = UPDATE_TYPE_FULL;
- else if (old_clip_rect_middle && new_clip_rect_right)
- new_update_type = UPDATE_TYPE_FULL;
- else if (old_clip_rect_right && new_clip_rect_middle)
- new_update_type = UPDATE_TYPE_FULL;
- else if (old_clip_rect_middle && new_clip_rect_left)
- new_update_type = UPDATE_TYPE_FULL;
- }
- }
- }
- }
- return new_update_type;
-}
-
/*
* dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
*
@@ -2746,10 +2727,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
- if (type == UPDATE_TYPE_MED)
- type = check_boundary_crossing_for_windowed_mpo_with_odm(dc,
- updates, surface_count, type);
-
if (type == UPDATE_TYPE_FAST) {
// If there's an available clock comparator, we use that.
if (dc->clk_mgr->funcs->are_clock_states_equal) {
@@ -2965,6 +2942,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_active_variable)
stream->vrr_active_variable = *update->vrr_active_variable;
+ if (update->vrr_active_fixed)
+ stream->vrr_active_fixed = *update->vrr_active_fixed;
+
if (update->crtc_timing_adjust)
stream->adjust = *update->crtc_timing_adjust;
@@ -3181,7 +3161,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
- if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
@@ -3269,6 +3249,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.prepare_bandwidth(dc, dc->current_state);
dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
}
+ } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
+ && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ /*
+ * Workaround for firmware issue in some receivers where they don't pick up
+ * correct output color space unless DP link is disabled/re-enabled
+ */
+ dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -3299,6 +3286,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s
&& stream->ctx->dce_version >= DCN_VERSION_3_1)
return true;
+ if (stream->link->replay_settings.config.replay_supported)
+ return true;
+
return false;
}
@@ -3309,7 +3299,6 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
struct dc_state *context)
{
union dmub_rb_cmd cmd;
- struct dc_context *dc_ctx = dc->ctx;
struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
unsigned int i, j;
unsigned int panel_inst = 0;
@@ -3350,12 +3339,168 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
update_dirty_rect->panel_inst = panel_inst;
update_dirty_rect->pipe_idx = j;
- dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
}
}
}
+static void build_dmub_update_dirty_rect(
+ struct dc *dc,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ struct dc_state *context,
+ struct dc_dmub_cmd dc_dmub_cmd[],
+ unsigned int *dmub_cmd_count)
+{
+ union dmub_rb_cmd cmd;
+ struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
+ unsigned int i, j;
+ unsigned int panel_inst = 0;
+
+ if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
+ return;
+
+ if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
+ return;
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
+ cmd.update_dirty_rect.header.sub_type = 0;
+ cmd.update_dirty_rect.header.payload_bytes =
+ sizeof(cmd.update_dirty_rect) -
+ sizeof(cmd.update_dirty_rect.header);
+ update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
+
+ if (!srf_updates[i].surface || !flip_addr)
+ continue;
+ /* Do not send in immediate flip mode */
+ if (srf_updates[i].surface->flip_immediate)
+ continue;
+ update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
+ update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
+ memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
+ sizeof(flip_addr->dirty_rects));
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+ update_dirty_rect->panel_inst = panel_inst;
+ update_dirty_rect->pipe_idx = j;
+ dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
+ dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+ (*dmub_cmd_count)++;
+ }
+ }
+}
+
+
+/**
+ * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
+ *
+ * @dc: Current DC state
+ * @srf_updates: Array of surface updates
+ * @surface_count: Number of surfaces that have an updated
+ * @stream: Corresponding stream to be updated in the current flip
+ * @context: New DC state to be programmed
+ *
+ * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
+ * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
+ *
+ * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
+ * to build an array of commands and have them sent while the OTG lock is acquired.
+ *
+ * Return: void
+ */
+static void build_dmub_cmd_list(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct dc_dmub_cmd dc_dmub_cmd[],
+ unsigned int *dmub_cmd_count)
+{
+ // Initialize cmd count to 0
+ *dmub_cmd_count = 0;
+ build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
+}
+
+static void commit_planes_for_stream_fast(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *context)
+{
+ int i, j;
+ struct pipe_ctx *top_pipe_to_program = NULL;
+ dc_z10_restore(dc);
+
+ top_pipe_to_program = resource_get_otg_master_for_stream(
+ &context->res_ctx,
+ stream);
+
+ if (dc->debug.visual_confirm) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state)
+ dc_update_viusal_confirm_color(dc, context, pipe);
+ }
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ /*set logical flag for lock/unlock use*/
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+ if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ continue;
+ pipe_ctx->plane_state->triplebuffer_flips = false;
+ if (update_type == UPDATE_TYPE_FAST &&
+ dc->hwss.program_triplebuffer &&
+ !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+ /*triple buffer for VUpdate only*/
+ pipe_ctx->plane_state->triplebuffer_flips = true;
+ }
+ }
+ }
+
+ build_dmub_cmd_list(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ context,
+ context->dc_dmub_cmd,
+ &(context->dmub_cmd_count));
+ hwss_build_fast_sequence(dc,
+ context->dc_dmub_cmd,
+ context->dmub_cmd_count,
+ context->block_sequence,
+ &(context->block_sequence_steps),
+ top_pipe_to_program);
+ hwss_execute_sequence(dc,
+ context->block_sequence,
+ context->block_sequence_steps);
+ /* Clear update flags so next flip doesn't have redundant programming
+ * (if there's no stream update, the update flags are not cleared).
+ * Surface updates are cleared unconditionally at the beginning of each flip,
+ * so no need to clear here.
+ */
+ if (top_pipe_to_program->stream)
+ top_pipe_to_program->stream->update_flags.raw = 0;
+}
+
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -3393,21 +3538,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
- if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
- /* Optimize seamless boot flag keeps clocks and watermarks high until
- * first flip. After first flip, optimization is required to lower
- * bandwidth. Important to note that it is expected UEFI will
- * only light up a single display on POST, therefore we only expect
- * one stream with seamless boot flag set.
- */
- if (stream->apply_seamless_boot_optimization) {
- stream->apply_seamless_boot_optimization = false;
-
- if (get_seamless_boot_stream_count(context) == 0)
- dc->optimized_required = true;
- }
- }
-
if (update_type == UPDATE_TYPE_FULL) {
dc_allow_idle_optimizations(dc, false);
@@ -3420,16 +3550,9 @@ static void commit_planes_for_stream(struct dc *dc,
context_clock_trace(dc, context);
}
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-
- if (!pipe_ctx->top_pipe &&
- !pipe_ctx->prev_odm_pipe &&
- pipe_ctx->stream &&
- pipe_ctx->stream == stream) {
- top_pipe_to_program = pipe_ctx;
- }
- }
+ top_pipe_to_program = resource_get_otg_master_for_stream(
+ &context->res_ctx,
+ stream);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -3449,6 +3572,14 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
+ if (dc->debug.visual_confirm)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state)
+ dc_update_viusal_confirm_color(dc, context, pipe);
+ }
+
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
struct pipe_ctx *odm_pipe;
@@ -3532,43 +3663,40 @@ static void commit_planes_for_stream(struct dc *dc,
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
- if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP &&
+ if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
+ dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
pipe_ctx->stream && pipe_ctx->plane_state) {
- /* Only update visual confirm for SUBVP here.
+ /* Only update visual confirm for SUBVP and Mclk switching here.
* The bar appears on all pipes, so we need to update the bar on all displays,
* so the information doesn't get stale.
*/
- struct mpcc_blnd_cfg blnd_cfg = { 0 };
-
- dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color,
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
pipe_ctx->plane_res.hubp->inst);
}
}
}
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- for (i = 0; i < surface_count; i++) {
- struct dc_plane_state *plane_state = srf_updates[i].surface;
- /*set logical flag for lock/unlock use*/
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
- if (!pipe_ctx->plane_state)
- continue;
- if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
- continue;
- pipe_ctx->plane_state->triplebuffer_flips = false;
- if (update_type == UPDATE_TYPE_FAST &&
- dc->hwss.program_triplebuffer != NULL &&
- !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
- /*triple buffer for VUpdate only*/
- pipe_ctx->plane_state->triplebuffer_flips = true;
- }
- }
- if (update_type == UPDATE_TYPE_FULL) {
- /* force vsync flip when reconfiguring pipes to prevent underflow */
- plane_state->flip_immediate = false;
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ /*set logical flag for lock/unlock use*/
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+ if (!pipe_ctx->plane_state)
+ continue;
+ if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ continue;
+ pipe_ctx->plane_state->triplebuffer_flips = false;
+ if (update_type == UPDATE_TYPE_FAST &&
+ dc->hwss.program_triplebuffer != NULL &&
+ !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+ /*triple buffer for VUpdate only*/
+ pipe_ctx->plane_state->triplebuffer_flips = true;
}
}
+ if (update_type == UPDATE_TYPE_FULL) {
+ /* force vsync flip when reconfiguring pipes to prevent underflow */
+ plane_state->flip_immediate = false;
+ }
}
// Update Type FULL, Surface updates
@@ -3865,13 +3993,14 @@ static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context = dc_create_state(dc);
- enum pipe_split_policy tmp_mpc_policy;
- bool temp_dynamic_odm_policy;
- bool temp_subvp_policy;
+ enum pipe_split_policy tmp_mpc_policy = 0;
+ bool temp_dynamic_odm_policy = 0;
+ bool temp_subvp_policy = 0;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
unsigned int pipe_in_use = 0;
bool subvp_in_use = false;
+ bool odm_in_use = false;
if (!transition_context)
return false;
@@ -3900,6 +4029,18 @@ static bool commit_minimal_transition_state(struct dc *dc,
}
}
+ /* If ODM is enabled and we are adding or removing planes from any ODM
+ * pipe, we must use the minimal transition.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->next_odm_pipe) {
+ odm_in_use = true;
+ break;
+ }
+ }
+
/* When the OS add a new surface if we have been used all of pipes with odm combine
* and mpc split feature, it need use commit_minimal_transition_state to transition safely.
* After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
@@ -3908,7 +4049,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
* enter/exit MPO when DCN still have enough resources.
*/
- if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) {
+ if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
dc_release_state(transition_context);
return true;
}
@@ -3972,6 +4113,161 @@ static bool commit_minimal_transition_state(struct dc *dc,
return true;
}
+/**
+ * update_seamless_boot_flags() - Helper function for updating seamless boot flags
+ *
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ * @surface_count: Number of surfaces that have an updated
+ * @stream: Corresponding stream to be updated in the current flip
+ *
+ * Updating seamless boot flags do not need to be part of the commit sequence. This
+ * helper function will update the seamless boot flags on each flip (if required)
+ * outside of the HW commit sequence (fast or slow).
+ *
+ * Return: void
+ */
+static void update_seamless_boot_flags(struct dc *dc,
+ struct dc_state *context,
+ int surface_count,
+ struct dc_stream_state *stream)
+{
+ if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ /* Optimize seamless boot flag keeps clocks and watermarks high until
+ * first flip. After first flip, optimization is required to lower
+ * bandwidth. Important to note that it is expected UEFI will
+ * only light up a single display on POST, therefore we only expect
+ * one stream with seamless boot flag set.
+ */
+ if (stream->apply_seamless_boot_optimization) {
+ stream->apply_seamless_boot_optimization = false;
+
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->optimized_required = true;
+ }
+ }
+}
+
+static void populate_fast_updates(struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update)
+{
+ int i = 0;
+
+ if (stream_update) {
+ fast_update[0].out_transfer_func = stream_update->out_transfer_func;
+ fast_update[0].output_csc_transform = stream_update->output_csc_transform;
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ fast_update[i].flip_addr = srf_updates[i].flip_addr;
+ fast_update[i].gamma = srf_updates[i].gamma;
+ fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
+ fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
+ fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
+ }
+}
+
+static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+{
+ int i;
+
+ if (fast_update[0].out_transfer_func ||
+ fast_update[0].output_csc_transform)
+ return true;
+
+ for (i = 0; i < surface_count; i++) {
+ if (fast_update[i].flip_addr ||
+ fast_update[i].gamma ||
+ fast_update[i].gamut_remap_matrix ||
+ fast_update[i].input_csc_color_matrix ||
+ fast_update[i].coeff_reduction_factor)
+ return true;
+ }
+
+ return false;
+}
+
+static bool full_update_required(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ struct dc_stream_state *stream)
+{
+
+ int i;
+ struct dc_stream_status *stream_status;
+ const struct dc_state *context = dc->current_state;
+
+ for (i = 0; i < surface_count; i++) {
+ if (srf_updates &&
+ (srf_updates[i].plane_info ||
+ srf_updates[i].scaling_info ||
+ (srf_updates[i].hdr_mult.value &&
+ srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
+ srf_updates[i].in_transfer_func ||
+ srf_updates[i].func_shaper ||
+ srf_updates[i].lut3d_func ||
+ srf_updates[i].blend_tf ||
+ srf_updates[i].surface->force_full_update ||
+ (srf_updates[i].flip_addr &&
+ srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
+ !is_surface_in_context(context, srf_updates[i].surface)))
+ return true;
+ }
+
+ if (stream_update &&
+ (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
+ stream_update->integer_scaling_update) ||
+ stream_update->hdr_static_metadata ||
+ stream_update->abm_level ||
+ stream_update->periodic_interrupt ||
+ stream_update->vrr_infopacket ||
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket ||
+ stream_update->hfvsif_infopacket ||
+ stream_update->vtem_infopacket ||
+ stream_update->adaptive_sync_infopacket ||
+ stream_update->dpms_off ||
+ stream_update->allow_freesync ||
+ stream_update->vrr_active_variable ||
+ stream_update->vrr_active_fixed ||
+ stream_update->gamut_remap ||
+ stream_update->output_color_space ||
+ stream_update->dither_option ||
+ stream_update->wb_update ||
+ stream_update->dsc_config ||
+ stream_update->mst_bw_update ||
+ stream_update->func_shaper ||
+ stream_update->lut3d_func ||
+ stream_update->pending_test_pattern ||
+ stream_update->crtc_timing_adjust))
+ return true;
+
+ if (stream) {
+ stream_status = dc_stream_get_status(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return true;
+ }
+ if (dc->idle_optimizations_allowed)
+ return true;
+
+ return false;
+}
+
+static bool fast_update_only(struct dc *dc,
+ struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ struct dc_stream_state *stream)
+{
+ return fast_updates_exist(fast_update, surface_count)
+ && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
+}
+
bool dc_update_planes_and_stream(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -3981,14 +4277,16 @@ bool dc_update_planes_and_stream(struct dc *dc,
enum surface_update_type update_type;
int i;
struct mall_temp_config mall_temp_config;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
* cause underflow. Apply stream configuration with minimal pipe
* split first to avoid unsupported transitions for active pipes.
*/
- bool force_minimal_pipe_splitting;
- bool is_plane_addition;
+ bool force_minimal_pipe_splitting = 0;
+ bool is_plane_addition = 0;
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
dc,
stream,
@@ -4038,14 +4336,26 @@ bool dc_update_planes_and_stream(struct dc *dc,
update_type = UPDATE_TYPE_FULL;
}
- commit_planes_for_stream(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
+ update_seamless_boot_flags(dc, context, surface_count, stream);
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update) {
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ } else {
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ }
if (dc->current_state != context) {
@@ -4085,7 +4395,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_state *context;
struct dc_context *dc_ctx = dc->ctx;
int i, j;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -4170,7 +4482,18 @@ void dc_commit_updates_for_stream(struct dc *dc,
TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
- commit_planes_for_stream(
+ update_seamless_boot_flags(dc, context, surface_count, stream);
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update) {
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ } else {
+ commit_planes_for_stream(
dc,
srf_updates,
surface_count,
@@ -4178,6 +4501,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
stream_update,
update_type,
context);
+ }
/*update current_State*/
if (dc->current_state != context) {
@@ -4264,9 +4588,6 @@ void dc_set_power_state(
dc_z10_restore(dc);
- if (dc->ctx->dmub_srv)
- dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
-
dc->hwss.init_hw(dc);
if (dc->hwss.init_sys_ctx != NULL &&
@@ -4473,15 +4794,17 @@ static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memcl
*/
void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
{
- uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
- unsigned int softMax, maxDPM, funcMin;
+ unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
bool p_state_change_support;
- if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
+ if (!dc->config.dc_mode_clk_limit_support)
return;
softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
- maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
+ for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
+ if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
+ maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
+ }
funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
@@ -4606,7 +4929,6 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
{
uint8_t action;
union dmub_rb_cmd cmd = {0};
- struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
ASSERT(payload->length <= 16);
@@ -4654,9 +4976,7 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
);
}
- dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
- dc_dmub_srv_wait_idle(dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -4700,7 +5020,6 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
struct dmub_notification *notify)
{
union dmub_rb_cmd cmd = {0};
- struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
bool is_cmd_complete = true;
/* prepare SET_CONFIG command */
@@ -4711,7 +5030,7 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
- if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
+ if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
/* command is not processed by dmub */
notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
return is_cmd_complete;
@@ -4746,7 +5065,6 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint8_t *mst_slots_in_use)
{
union dmub_rb_cmd cmd = {0};
- struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
/* prepare MST_ALLOC_SLOTS command */
cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
@@ -4755,7 +5073,7 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
- if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
+ if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
/* command is not processed by dmub */
return DC_ERROR_UNEXPECTED;
@@ -4789,19 +5107,28 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
uint32_t hpd_int_enable)
{
union dmub_rb_cmd cmd = {0};
- struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
- dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
- dc_dmub_srv_wait_idle(dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
}
/**
+ * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
+ *
+ * @dc: [in] dc structure
+ *
+ *
+ */
+void dc_print_dmub_diagnostic_data(const struct dc *dc)
+{
+ dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
+}
+
+/**
* dc_disable_accelerated_mode - disable accelerated mode
* @dc: dc structure
*/
@@ -4832,6 +5159,9 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
if (link->psr_settings.psr_feature_enabled)
return;
+ if (link->replay_settings.replay_feature_enabled)
+ return;
+
/*find primary pipe associated with stream*/
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -4861,20 +5191,69 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
}
-/**
- * dc_extended_blank_supported - Decide whether extended blank is supported
- *
- * @dc: [in] Current DC state
- *
- * Extended blank is a freesync optimization feature to be enabled in the
- * future. During the extra vblank period gained from freesync, we have the
- * ability to enter z9/z10.
+/*****************************************************************************
+ * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
+ * ABM
+ * @dc: dc structure
+ * @stream: stream where vsync int state changed
+ * @pData: abm hw states
*
- * Return:
- * Indicate whether extended blank is supported (%true or %false)
- */
-bool dc_extended_blank_supported(struct dc *dc)
+ ****************************************************************************/
+bool dc_abm_save_restore(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct abm_save_restore *pData)
+{
+ int i;
+ int edp_num;
+ struct pipe_ctx *pipe = NULL;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link *edp_links[MAX_NUM_EDP];
+
+
+ /*find primary pipe associated with stream*/
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg)
+ break;
+ }
+
+ if (i == MAX_PIPES) {
+ ASSERT(0);
+ return false;
+ }
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+
+ /* Determine panel inst */
+ for (i = 0; i < edp_num; i++)
+ if (edp_links[i] == link)
+ break;
+
+ if (i == edp_num)
+ return false;
+
+ if (pipe->stream_res.abm &&
+ pipe->stream_res.abm->funcs->save_restore)
+ return pipe->stream_res.abm->funcs->save_restore(
+ pipe->stream_res.abm,
+ i,
+ pData);
+ return false;
+}
+
+void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
{
- return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
- && dc->caps.zstate_support && dc->caps.is_apu;
+ unsigned int i;
+ bool subvp_in_use = false;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) {
+ subvp_in_use = true;
+ break;
+ }
+ }
+ properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 2acbf692193f..f99ec1b0efaf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -27,6 +27,8 @@
#include "core_types.h"
#include "timing_generator.h"
#include "hw_sequencer.h"
+#include "hw_sequencer_private.h"
+#include "basics/dc_common.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
@@ -185,6 +187,7 @@ static bool is_ycbcr709_limited_type(
ret = true;
return ret;
}
+
static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
{
enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE;
@@ -421,6 +424,7 @@ void get_hdr_visual_confirm_color(
void get_subvp_visual_confirm_color(
struct dc *dc,
+ struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
@@ -428,15 +432,17 @@ void get_subvp_visual_confirm_color(
bool enable_subvp = false;
int i;
- if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx)
+ if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context)
return;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->mall_stream_config.paired_stream &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
/* SubVP enable - red */
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
color->color_r_cr = color_value;
enable_subvp = true;
@@ -448,12 +454,374 @@ void get_subvp_visual_confirm_color(
if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) {
color->color_r_cr = 0;
- if (pipe_ctx->stream->ignore_msa_timing_param == 1)
+ if (pipe_ctx->stream->allow_freesync == 1) {
/* SubVP enable and DRR on - green */
+ color->color_b_cb = 0;
color->color_g_y = color_value;
- else
+ } else {
/* SubVP enable and No DRR - blue */
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ }
+ }
+}
+
+void hwss_build_fast_sequence(struct dc *dc,
+ struct dc_dmub_cmd *dc_dmub_cmd,
+ unsigned int dmub_cmd_count,
+ struct block_sequence block_sequence[],
+ int *num_steps,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_plane_state *plane = pipe_ctx->plane_state;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct pipe_ctx *current_pipe = NULL;
+ struct pipe_ctx *current_mpc_pipe = NULL;
+ unsigned int i = 0;
+
+ *num_steps = 0; // Initialize to 0
+
+ if (!plane || !stream)
+ return;
+
+ if (dc->hwss.subvp_pipe_control_lock_fast) {
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true;
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
+ block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
+ (*num_steps)++;
+ }
+ if (dc->hwss.pipe_control_lock) {
+ block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc;
+ block_sequence[*num_steps].params.pipe_control_lock_params.lock = true;
+ block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx;
+ block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK;
+ (*num_steps)++;
+ }
+
+ for (i = 0; i < dmub_cmd_count; i++) {
+ block_sequence[*num_steps].params.send_dmcub_cmd_params.ctx = dc->ctx;
+ block_sequence[*num_steps].params.send_dmcub_cmd_params.cmd = &(dc_dmub_cmd[i].dmub_cmd);
+ block_sequence[*num_steps].params.send_dmcub_cmd_params.wait_type = dc_dmub_cmd[i].wait_type;
+ block_sequence[*num_steps].func = DMUB_SEND_DMCUB_CMD;
+ (*num_steps)++;
+ }
+
+ current_pipe = pipe_ctx;
+ while (current_pipe) {
+ current_mpc_pipe = current_pipe;
+ while (current_mpc_pipe) {
+ if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.raw) {
+ block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate;
+ block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
+ (*num_steps)++;
+ }
+ if (dc->hwss.program_triplebuffer && dc->debug.enable_tri_buf && current_mpc_pipe->plane_state->update_flags.raw) {
+ block_sequence[*num_steps].params.program_triplebuffer_params.dc = dc;
+ block_sequence[*num_steps].params.program_triplebuffer_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.program_triplebuffer_params.enableTripleBuffer = current_mpc_pipe->plane_state->triplebuffer_flips;
+ block_sequence[*num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER;
+ (*num_steps)++;
+ }
+ if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) {
+ block_sequence[*num_steps].params.update_plane_addr_params.dc = dc;
+ block_sequence[*num_steps].params.update_plane_addr_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = HUBP_UPDATE_PLANE_ADDR;
+ (*num_steps)++;
+ }
+
+ if (hws->funcs.set_input_transfer_func && current_mpc_pipe->plane_state->update_flags.bits.gamma_change) {
+ block_sequence[*num_steps].params.set_input_transfer_func_params.dc = dc;
+ block_sequence[*num_steps].params.set_input_transfer_func_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_input_transfer_func_params.plane_state = current_mpc_pipe->plane_state;
+ block_sequence[*num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC;
+ (*num_steps)++;
+ }
+
+ if (dc->hwss.program_gamut_remap && current_mpc_pipe->plane_state->update_flags.bits.gamut_remap_change) {
+ block_sequence[*num_steps].params.program_gamut_remap_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = DPP_PROGRAM_GAMUT_REMAP;
+ (*num_steps)++;
+ }
+ if (current_mpc_pipe->plane_state->update_flags.bits.input_csc_change) {
+ block_sequence[*num_steps].params.setup_dpp_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = DPP_SETUP_DPP;
+ (*num_steps)++;
+ }
+ if (current_mpc_pipe->plane_state->update_flags.bits.coeff_reduction_change) {
+ block_sequence[*num_steps].params.program_bias_and_scale_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE;
+ (*num_steps)++;
+ }
+ if (hws->funcs.set_output_transfer_func && current_mpc_pipe->stream->update_flags.bits.out_tf) {
+ block_sequence[*num_steps].params.set_output_transfer_func_params.dc = dc;
+ block_sequence[*num_steps].params.set_output_transfer_func_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_output_transfer_func_params.stream = current_mpc_pipe->stream;
+ block_sequence[*num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC;
+ (*num_steps)++;
+ }
+
+ if (current_mpc_pipe->stream->update_flags.bits.out_csc) {
+ block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpc = dc->res_pool->mpc;
+ block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = current_mpc_pipe->plane_res.hubp->inst;
+ block_sequence[*num_steps].params.power_on_mpc_mem_pwr_params.power_on = true;
+ block_sequence[*num_steps].func = MPC_POWER_ON_MPC_MEM_PWR;
+ (*num_steps)++;
+
+ if (current_mpc_pipe->stream->csc_color_matrix.enable_adjustment == true) {
+ block_sequence[*num_steps].params.set_output_csc_params.mpc = dc->res_pool->mpc;
+ block_sequence[*num_steps].params.set_output_csc_params.opp_id = current_mpc_pipe->stream_res.opp->inst;
+ block_sequence[*num_steps].params.set_output_csc_params.regval = current_mpc_pipe->stream->csc_color_matrix.matrix;
+ block_sequence[*num_steps].params.set_output_csc_params.ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ block_sequence[*num_steps].func = MPC_SET_OUTPUT_CSC;
+ (*num_steps)++;
+ } else {
+ block_sequence[*num_steps].params.set_ocsc_default_params.mpc = dc->res_pool->mpc;
+ block_sequence[*num_steps].params.set_ocsc_default_params.opp_id = current_mpc_pipe->stream_res.opp->inst;
+ block_sequence[*num_steps].params.set_ocsc_default_params.color_space = current_mpc_pipe->stream->output_color_space;
+ block_sequence[*num_steps].params.set_ocsc_default_params.ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
+ block_sequence[*num_steps].func = MPC_SET_OCSC_DEFAULT;
+ (*num_steps)++;
+ }
+ }
+ current_mpc_pipe = current_mpc_pipe->bottom_pipe;
+ }
+ current_pipe = current_pipe->next_odm_pipe;
+ }
+
+ if (dc->hwss.pipe_control_lock) {
+ block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc;
+ block_sequence[*num_steps].params.pipe_control_lock_params.lock = false;
+ block_sequence[*num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx;
+ block_sequence[*num_steps].func = OPTC_PIPE_CONTROL_LOCK;
+ (*num_steps)++;
+ }
+ if (dc->hwss.subvp_pipe_control_lock_fast) {
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false;
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
+ block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
+ (*num_steps)++;
+ }
+
+ current_pipe = pipe_ctx;
+ while (current_pipe) {
+ current_mpc_pipe = current_pipe;
+
+ while (current_mpc_pipe) {
+ if (!current_mpc_pipe->bottom_pipe && !current_mpc_pipe->next_odm_pipe &&
+ current_mpc_pipe->stream && current_mpc_pipe->plane_state &&
+ current_mpc_pipe->plane_state->update_flags.bits.addr_update &&
+ !current_mpc_pipe->plane_state->skip_manual_trigger) {
+ block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER;
+ (*num_steps)++;
+ }
+ current_mpc_pipe = current_mpc_pipe->bottom_pipe;
+ }
+ current_pipe = current_pipe->next_odm_pipe;
+ }
+}
+
+void hwss_execute_sequence(struct dc *dc,
+ struct block_sequence block_sequence[],
+ int num_steps)
+{
+ unsigned int i;
+ union block_sequence_params *params;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ for (i = 0; i < num_steps; i++) {
+ params = &(block_sequence[i].params);
+ switch (block_sequence[i].func) {
+
+ case DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST:
+ dc->hwss.subvp_pipe_control_lock_fast(params);
+ break;
+ case OPTC_PIPE_CONTROL_LOCK:
+ dc->hwss.pipe_control_lock(params->pipe_control_lock_params.dc,
+ params->pipe_control_lock_params.pipe_ctx,
+ params->pipe_control_lock_params.lock);
+ break;
+ case HUBP_SET_FLIP_CONTROL_GSL:
+ dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx,
+ params->set_flip_control_gsl_params.flip_immediate);
+ break;
+ case HUBP_PROGRAM_TRIPLEBUFFER:
+ dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc,
+ params->program_triplebuffer_params.pipe_ctx,
+ params->program_triplebuffer_params.enableTripleBuffer);
+ break;
+ case HUBP_UPDATE_PLANE_ADDR:
+ dc->hwss.update_plane_addr(params->update_plane_addr_params.dc,
+ params->update_plane_addr_params.pipe_ctx);
+ break;
+ case DPP_SET_INPUT_TRANSFER_FUNC:
+ hws->funcs.set_input_transfer_func(params->set_input_transfer_func_params.dc,
+ params->set_input_transfer_func_params.pipe_ctx,
+ params->set_input_transfer_func_params.plane_state);
+ break;
+ case DPP_PROGRAM_GAMUT_REMAP:
+ dc->hwss.program_gamut_remap(params->program_gamut_remap_params.pipe_ctx);
+ break;
+ case DPP_SETUP_DPP:
+ hwss_setup_dpp(params);
+ break;
+ case DPP_PROGRAM_BIAS_AND_SCALE:
+ hwss_program_bias_and_scale(params);
+ break;
+ case OPTC_PROGRAM_MANUAL_TRIGGER:
+ hwss_program_manual_trigger(params);
+ break;
+ case DPP_SET_OUTPUT_TRANSFER_FUNC:
+ hws->funcs.set_output_transfer_func(params->set_output_transfer_func_params.dc,
+ params->set_output_transfer_func_params.pipe_ctx,
+ params->set_output_transfer_func_params.stream);
+ break;
+ case MPC_UPDATE_VISUAL_CONFIRM:
+ dc->hwss.update_visual_confirm_color(params->update_visual_confirm_params.dc,
+ params->update_visual_confirm_params.pipe_ctx,
+ params->update_visual_confirm_params.mpcc_id);
+ break;
+ case MPC_POWER_ON_MPC_MEM_PWR:
+ hwss_power_on_mpc_mem_pwr(params);
+ break;
+ case MPC_SET_OUTPUT_CSC:
+ hwss_set_output_csc(params);
+ break;
+ case MPC_SET_OCSC_DEFAULT:
+ hwss_set_ocsc_default(params);
+ break;
+ case DMUB_SEND_DMCUB_CMD:
+ hwss_send_dmcub_cmd(params);
+ break;
+ default:
+ ASSERT(false);
+ break;
+ }
+ }
+}
+
+void hwss_send_dmcub_cmd(union block_sequence_params *params)
+{
+ struct dc_context *ctx = params->send_dmcub_cmd_params.ctx;
+ union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd;
+ enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type;
+
+ dm_execute_dmub_cmd(ctx, cmd, wait_type);
+}
+
+void hwss_program_manual_trigger(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx;
+
+ if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
+ pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
+}
+
+void hwss_setup_dpp(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->setup_dpp_params.pipe_ctx;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ if (dpp && dpp->funcs->dpp_setup) {
+ // program the input csc
+ dpp->funcs->dpp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO,
+ plane_state->input_csc_color_matrix,
+ plane_state->color_space,
+ NULL);
+ }
+}
+
+void hwss_program_bias_and_scale(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->program_bias_and_scale_params.pipe_ctx;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dc_bias_and_scale bns_params = {0};
+
+ //TODO :for CNVC set scale and bias registers if necessary
+ build_prescale_params(&bns_params, plane_state);
+ if (dpp->funcs->dpp_program_bias_and_scale)
+ dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+}
+
+void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->power_on_mpc_mem_pwr_params.mpc;
+ int mpcc_id = params->power_on_mpc_mem_pwr_params.mpcc_id;
+ bool power_on = params->power_on_mpc_mem_pwr_params.power_on;
+
+ if (mpc->funcs->power_on_mpc_mem_pwr)
+ mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, power_on);
+}
+
+void hwss_set_output_csc(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->set_output_csc_params.mpc;
+ int opp_id = params->set_output_csc_params.opp_id;
+ const uint16_t *matrix = params->set_output_csc_params.regval;
+ enum mpc_output_csc_mode ocsc_mode = params->set_output_csc_params.ocsc_mode;
+
+ if (mpc->funcs->set_output_csc != NULL)
+ mpc->funcs->set_output_csc(mpc,
+ opp_id,
+ matrix,
+ ocsc_mode);
+}
+
+void hwss_set_ocsc_default(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->set_ocsc_default_params.mpc;
+ int opp_id = params->set_ocsc_default_params.opp_id;
+ enum dc_color_space colorspace = params->set_ocsc_default_params.color_space;
+ enum mpc_output_csc_mode ocsc_mode = params->set_ocsc_default_params.ocsc_mode;
+
+ if (mpc->funcs->set_ocsc_default != NULL)
+ mpc->funcs->set_ocsc_default(mpc,
+ opp_id,
+ colorspace,
+ ocsc_mode);
+}
+
+void get_mclk_switch_visual_confirm_color(
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
+ return;
+
+ if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
+ dm_dram_clock_change_unsupported) {
+ /* MCLK switching is supported */
+ if (!pipe_ctx->has_vactive_margin) {
+ /* In Vblank - yellow */
+ color->color_r_cr = color_value;
+ color->color_g_y = color_value;
+
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ /* FPO + Vblank - cyan */
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = color_value;
+ }
+ } else {
+ /* In Vactive - pink */
+ color->color_r_cr = color_value;
color->color_b_cb = color_value;
+ }
+ /* SubVP */
+ get_subvp_visual_confirm_color(dc, context, pipe_ctx, color);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 18e098568cb4..ed94187c2afa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -314,6 +314,24 @@ const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link)
return link->dc->link_srv->dp_get_verified_link_cap(link);
}
+enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link)
+{
+ if (dc_is_dp_signal(link->connector_signal)) {
+ if (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_DVI_DONGLE &&
+ link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE)
+ return DC_LINK_ENCODING_HDMI_TMDS;
+ else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) ==
+ DP_8b_10b_ENCODING)
+ return DC_LINK_ENCODING_DP_8b_10b;
+ else if (link->dc->link_srv->dp_get_encoding_format(&link->verified_link_cap) ==
+ DP_128b_132b_ENCODING)
+ return DC_LINK_ENCODING_DP_128b_132b;
+ } else if (dc_is_hdmi_signal(link->connector_signal)) {
+ }
+
+ return DC_LINK_ENCODING_UNSPECIFIED;
+}
+
bool dc_link_is_dp_sink_present(struct dc_link *link)
{
return link->dc->link_srv->dp_is_sink_present(link);
@@ -449,6 +467,11 @@ bool dc_link_setup_psr(struct dc_link *link,
return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context);
}
+bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state)
+{
+ return link->dc->link_srv->edp_get_replay_state(link, state);
+}
+
bool dc_link_wait_for_t12(struct dc_link *link)
{
return link->dc->link_srv->edp_wait_for_t12(link);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 117d80cb36fb..f7b51aca6020 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -45,6 +45,8 @@
#include "link/hwss/link_hwss_dio.h"
#include "link/hwss/link_hwss_dpia.h"
#include "link/hwss/link_hwss_hpo_dp.h"
+#include "link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h"
+#include "link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h"
#if defined(CONFIG_DRM_AMD_DC_SI)
#include "dce60/dce60_resource.h"
@@ -69,8 +71,20 @@
#include "../dcn32/dcn32_resource.h"
#include "../dcn321/dcn321_resource.h"
+#define VISUAL_CONFIRM_BASE_DEFAULT 3
+#define VISUAL_CONFIRM_BASE_MIN 1
+#define VISUAL_CONFIRM_BASE_MAX 10
+/* we choose 240 because it is a common denominator of common v addressable
+ * such as 2160, 1440, 1200, 960. So we take 1/240 portion of v addressable as
+ * the visual confirm dpp offset height. So visual confirm height can stay
+ * relatively the same independent from timing used.
+ */
+#define VISUAL_CONFIRM_DPP_OFFSET_DENO 240
+
#define DC_LOGGER_INIT(logger)
+#define UNABLE_TO_SPLIT -1
+
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
{
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -718,10 +732,10 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
-int get_num_mpc_splits(struct pipe_ctx *pipe)
+int resource_get_num_mpc_splits(const struct pipe_ctx *pipe)
{
int mpc_split_count = 0;
- struct pipe_ctx *other_pipe = pipe->bottom_pipe;
+ const struct pipe_ctx *other_pipe = pipe->bottom_pipe;
while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
mpc_split_count++;
@@ -736,48 +750,46 @@ int get_num_mpc_splits(struct pipe_ctx *pipe)
return mpc_split_count;
}
-int get_num_odm_splits(struct pipe_ctx *pipe)
+int resource_get_num_odm_splits(const struct pipe_ctx *pipe)
{
int odm_split_count = 0;
- struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
- while (next_pipe) {
- odm_split_count++;
- next_pipe = next_pipe->next_odm_pipe;
- }
- pipe = pipe->prev_odm_pipe;
- while (pipe) {
+
+ pipe = resource_get_otg_master(pipe);
+
+ while (pipe->next_odm_pipe) {
odm_split_count++;
- pipe = pipe->prev_odm_pipe;
+ pipe = pipe->next_odm_pipe;
}
return odm_split_count;
}
-static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx)
+static int get_odm_split_index(struct pipe_ctx *pipe_ctx)
{
- *split_count = get_num_odm_splits(pipe_ctx);
- *split_idx = 0;
- if (*split_count == 0) {
- /*Check for mpc split*/
- struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+ int index = 0;
- *split_count = get_num_mpc_splits(pipe_ctx);
- while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
- (*split_idx)++;
- split_pipe = split_pipe->top_pipe;
- }
+ pipe_ctx = resource_get_opp_head(pipe_ctx);
+ if (!pipe_ctx)
+ return 0;
- /* MPO window on right side of ODM split */
- if (split_pipe && split_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe)
- (*split_idx)++;
- } else {
- /*Get odm split index*/
- struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
+ while (pipe_ctx->prev_odm_pipe) {
+ index++;
+ pipe_ctx = pipe_ctx->prev_odm_pipe;
+ }
- while (split_pipe) {
- (*split_idx)++;
- split_pipe = split_pipe->prev_odm_pipe;
- }
+ return index;
+}
+
+static int get_mpc_split_index(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+ int index = 0;
+
+ while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+ index++;
+ split_pipe = split_pipe->top_pipe;
}
+
+ return index;
}
/*
@@ -799,82 +811,366 @@ static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
}
}
-static void calculate_recout(struct pipe_ctx *pipe_ctx)
+static struct rect intersect_rec(const struct rect *r0, const struct rect *r1)
{
- const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- const struct dc_stream_state *stream = pipe_ctx->stream;
- struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
- struct rect surf_clip = plane_state->clip_rect;
- bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
- int split_count, split_idx;
+ struct rect rec;
+ int r0_x_end = r0->x + r0->width;
+ int r1_x_end = r1->x + r1->width;
+ int r0_y_end = r0->y + r0->height;
+ int r1_y_end = r1->y + r1->height;
+
+ rec.x = r0->x > r1->x ? r0->x : r1->x;
+ rec.width = r0_x_end > r1_x_end ? r1_x_end - rec.x : r0_x_end - rec.x;
+ rec.y = r0->y > r1->y ? r0->y : r1->y;
+ rec.height = r0_y_end > r1_y_end ? r1_y_end - rec.y : r0_y_end - rec.y;
+
+ /* in case that there is no intersection */
+ if (rec.width < 0 || rec.height < 0)
+ memset(&rec, 0, sizeof(rec));
+
+ return rec;
+}
- calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
- if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
- split_idx = 0;
+static struct rect shift_rec(const struct rect *rec_in, int x, int y)
+{
+ struct rect rec_out = *rec_in;
+ rec_out.x += x;
+ rec_out.y += y;
+
+ return rec_out;
+}
+
+static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ int odm_slice_count = resource_get_num_odm_splits(pipe_ctx) + 1;
+ int odm_slice_idx = get_odm_split_index(pipe_ctx);
+ bool is_last_odm_slice = (odm_slice_idx + 1) == odm_slice_count;
+ int h_active = stream->timing.h_addressable +
+ stream->timing.h_border_left +
+ stream->timing.h_border_right;
+ int odm_slice_width = h_active / odm_slice_count;
+ struct rect odm_rec;
+
+ odm_rec.x = odm_slice_width * odm_slice_idx;
+ odm_rec.width = is_last_odm_slice ?
+ /* last slice width is the reminder of h_active */
+ h_active - odm_slice_width * (odm_slice_count - 1) :
+ /* odm slice width is the floor of h_active / count */
+ odm_slice_width;
+ odm_rec.y = 0;
+ odm_rec.height = stream->timing.v_addressable +
+ stream->timing.v_border_bottom +
+ stream->timing.v_border_top;
+
+ return odm_rec;
+}
+
+static struct rect calculate_plane_rec_in_timing_active(
+ struct pipe_ctx *pipe_ctx,
+ const struct rect *rec_in)
+{
/*
- * Only the leftmost ODM pipe should be offset by a nonzero distance
+ * The following diagram shows an example where we map a 1920x1200
+ * desktop to a 2560x1440 timing with a plane rect in the middle
+ * of the screen. To map a plane rect from Stream Source to Timing
+ * Active space, we first multiply stream scaling ratios (i.e 2304/1920
+ * horizontal and 1440/1200 vertical) to the plane's x and y, then
+ * we add stream destination offsets (i.e 128 horizontal, 0 vertical).
+ * This will give us a plane rect's position in Timing Active. However
+ * we have to remove the fractional. The rule is that we find left/right
+ * and top/bottom positions and round the value to the adjacent integer.
+ *
+ * Stream Source Space
+ * ------------
+ * __________________________________________________
+ * |Stream Source (1920 x 1200) ^ |
+ * | y |
+ * | <------- w --------|> |
+ * | __________________V |
+ * |<-- x -->|Plane//////////////| ^ |
+ * | |(pre scale)////////| | |
+ * | |///////////////////| | |
+ * | |///////////////////| h |
+ * | |///////////////////| | |
+ * | |///////////////////| | |
+ * | |///////////////////| V |
+ * | |
+ * | |
+ * |__________________________________________________|
+ *
+ *
+ * Timing Active Space
+ * ---------------------------------
+ *
+ * Timing Active (2560 x 1440)
+ * __________________________________________________
+ * |*****| Stteam Destination (2304 x 1440) |*****|
+ * |*****| |*****|
+ * |<128>| |*****|
+ * |*****| __________________ |*****|
+ * |*****| |Plane/////////////| |*****|
+ * |*****| |(post scale)//////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |//////////////////| |*****|
+ * |*****| |*****|
+ * |*****| |*****|
+ * |*****| |*****|
+ * |*****|______________________________________|*****|
+ *
+ * So the resulting formulas are shown below:
+ *
+ * recout_x = 128 + round(plane_x * 2304 / 1920)
+ * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x
+ * recout_y = 0 + round(plane_y * 1440 / 1280)
+ * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y
+ *
+ * NOTE: fixed point division is not error free. To reduce errors
+ * introduced by fixed point division, we divide only after
+ * multiplication is complete.
*/
- if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe && !pipe_ctx->prev_odm_pipe) {
- /* MPO window on right side of ODM split */
- data->recout.x = stream->dst.x + (surf_clip.x - stream->src.x - stream->src.width/2) *
- stream->dst.width / stream->src.width;
- } else if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) {
- data->recout.x = stream->dst.x;
- if (stream->src.x < surf_clip.x)
- data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
- / stream->src.width;
- } else
- data->recout.x = 0;
-
- if (stream->src.x > surf_clip.x)
- surf_clip.width -= stream->src.x - surf_clip.x;
- data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
- if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
- data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
-
- data->recout.y = stream->dst.y;
- if (stream->src.y < surf_clip.y)
- data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height
- / stream->src.height;
- else if (stream->src.y > surf_clip.y)
- surf_clip.height -= stream->src.y - surf_clip.y;
-
- data->recout.height = surf_clip.height * stream->dst.height / stream->src.height;
- if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
- data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
-
- /* Handle h & v split */
- if (split_tb) {
- ASSERT(data->recout.height % 2 == 0);
- data->recout.height /= 2;
- } else if (split_count) {
- if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
- /* extra pixels in the division remainder need to go to pipes after
- * the extra pixel index minus one(epimo) defined here as:
- */
- int epimo = split_count - data->recout.width % (split_count + 1);
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct rect rec_out = {0};
+ struct fixed31_32 temp;
- data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
- if (split_idx > epimo)
- data->recout.x += split_idx - epimo - 1;
- ASSERT(stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || data->recout.width % 2 == 0);
- data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
- } else {
- /* odm */
- if (split_idx == split_count) {
- /* rightmost pipe is the remainder recout */
- data->recout.width -= data->h_active * split_count - data->recout.x;
-
- /* ODM combine cases with MPO we can get negative widths */
- if (data->recout.width < 0)
- data->recout.width = 0;
-
- data->recout.x = 0;
- } else
- data->recout.width = data->h_active - data->recout.x;
- }
+ temp = dc_fixpt_from_fraction(rec_in->x * stream->dst.width,
+ stream->src.width);
+ rec_out.x = stream->dst.x + dc_fixpt_round(temp);
+
+ temp = dc_fixpt_from_fraction(
+ (rec_in->x + rec_in->width) * stream->dst.width,
+ stream->src.width);
+ rec_out.width = stream->dst.x + dc_fixpt_round(temp) - rec_out.x;
+
+ temp = dc_fixpt_from_fraction(rec_in->y * stream->dst.height,
+ stream->src.height);
+ rec_out.y = stream->dst.y + dc_fixpt_round(temp);
+
+ temp = dc_fixpt_from_fraction(
+ (rec_in->y + rec_in->height) * stream->dst.height,
+ stream->src.height);
+ rec_out.height = stream->dst.y + dc_fixpt_round(temp) - rec_out.y;
+
+ return rec_out;
+}
+
+static struct rect calculate_mpc_slice_in_timing_active(
+ struct pipe_ctx *pipe_ctx,
+ struct rect *plane_clip_rec)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ int mpc_slice_count = resource_get_num_mpc_splits(pipe_ctx) + 1;
+ int mpc_slice_idx = get_mpc_split_index(pipe_ctx);
+ int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
+ struct rect mpc_rec;
+
+ mpc_rec.width = plane_clip_rec->width / mpc_slice_count;
+ mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+ ASSERT(mpc_slice_count == 1 ||
+ stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ mpc_rec.width % 2 == 0);
+
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ if (mpc_slice_idx > epimo) {
+ mpc_rec.x += mpc_slice_idx - epimo - 1;
+ mpc_rec.width += 1;
}
+
+ if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+ ASSERT(mpc_rec.height % 2 == 0);
+ mpc_rec.height /= 2;
+ }
+ return mpc_rec;
+}
+
+static void adjust_recout_for_visual_confirm(struct rect *recout,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ int dpp_offset, base_offset;
+
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
+ return;
+
+ dpp_offset = pipe_ctx->stream->timing.v_addressable / VISUAL_CONFIRM_DPP_OFFSET_DENO;
+ dpp_offset *= pipe_ctx->plane_res.dpp->inst;
+
+ if ((dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_BASE_MIN) &&
+ dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_BASE_MAX)
+ base_offset = dc->debug.visual_confirm_rect_height;
+ else
+ base_offset = VISUAL_CONFIRM_BASE_DEFAULT;
+
+ recout->height -= base_offset;
+ recout->height -= dpp_offset;
+}
+
+/*
+ * The function maps a plane clip from Stream Source Space to ODM Slice Space
+ * and calculates the rec of the overlapping area of MPC slice of the plane
+ * clip, ODM slice associated with the pipe context and stream destination rec.
+ */
+static void calculate_recout(struct pipe_ctx *pipe_ctx)
+{
+ /*
+ * A plane clip represents the desired plane size and position in Stream
+ * Source Space. Stream Source is the destination where all planes are
+ * blended (i.e. positioned, scaled and overlaid). It is a canvas where
+ * all planes associated with the current stream are drawn together.
+ * After Stream Source is completed, we will further scale and
+ * reposition the entire canvas of the stream source to Stream
+ * Destination in Timing Active Space. This could be due to display
+ * overscan adjustment where we will need to rescale and reposition all
+ * the planes so they can fit into a TV with overscan or downscale
+ * upscale features such as GPU scaling or VSR.
+ *
+ * This two step blending is a virtual procedure in software. In
+ * hardware there is no such thing as Stream Source. all planes are
+ * blended once in Timing Active Space. Software virtualizes a Stream
+ * Source space to decouple the math complicity so scaling param
+ * calculation focuses on one step at a time.
+ *
+ * In the following two diagrams, user applied 10% overscan adjustment
+ * so the Stream Source needs to be scaled down a little before mapping
+ * to Timing Active Space. As a result the Plane Clip is also scaled
+ * down by the same ratio, Plane Clip position (i.e. x and y) with
+ * respect to Stream Source is also scaled down. To map it in Timing
+ * Active Space additional x and y offsets from Stream Destination are
+ * added to Plane Clip as well.
+ *
+ * Stream Source Space
+ * ------------
+ * __________________________________________________
+ * |Stream Source (3840 x 2160) ^ |
+ * | y |
+ * | | |
+ * | __________________V |
+ * |<-- x -->|Plane Clip/////////| |
+ * | |(pre scale)////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |///////////////////| |
+ * | |
+ * | |
+ * |__________________________________________________|
+ *
+ *
+ * Timing Active Space (3840 x 2160)
+ * ---------------------------------
+ *
+ * Timing Active
+ * __________________________________________________
+ * | y_____________________________________________ |
+ * |x |Stream Destination (3456 x 1944) | |
+ * | | | |
+ * | | __________________ | |
+ * | | |Plane Clip////////| | |
+ * | | |(post scale)//////| | |
+ * | | |//////////////////| | |
+ * | | |//////////////////| | |
+ * | | |//////////////////| | |
+ * | | |//////////////////| | |
+ * | | | |
+ * | | | |
+ * | |____________________________________________| |
+ * |__________________________________________________|
+ *
+ *
+ * In Timing Active Space a plane clip could be further sliced into
+ * pieces called MPC slices. Each Pipe Context is responsible for
+ * processing only one MPC slice so the plane processing workload can be
+ * distributed to multiple DPP Pipes. MPC slices could be blended
+ * together to a single ODM slice. Each ODM slice is responsible for
+ * processing a portion of Timing Active divided horizontally so the
+ * output pixel processing workload can be distributed to multiple OPP
+ * pipes. All ODM slices are mapped together in ODM block so all MPC
+ * slices belong to different ODM slices could be pieced together to
+ * form a single image in Timing Active. MPC slices must belong to
+ * single ODM slice. If an MPC slice goes across ODM slice boundary, it
+ * needs to be divided into two MPC slices one for each ODM slice.
+ *
+ * In the following diagram the output pixel processing workload is
+ * divided horizontally into two ODM slices one for each OPP blend tree.
+ * OPP0 blend tree is responsible for processing left half of Timing
+ * Active, while OPP2 blend tree is responsible for processing right
+ * half.
+ *
+ * The plane has two MPC slices. However since the right MPC slice goes
+ * across ODM boundary, two DPP pipes are needed one for each OPP blend
+ * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree).
+ *
+ * Assuming that we have a Pipe Context associated with OPP0 and DPP1
+ * working on processing the plane in the diagram. We want to know the
+ * width and height of the shaded rectangle and its relative position
+ * with respect to the ODM slice0. This is called the recout of the pipe
+ * context.
+ *
+ * Planes can be at arbitrary size and position and there could be an
+ * arbitrary number of MPC and ODM slices. The algorithm needs to take
+ * all scenarios into account.
+ *
+ * Timing Active Space (3840 x 2160)
+ * ---------------------------------
+ *
+ * Timing Active
+ * __________________________________________________
+ * |OPP0(ODM slice0)^ |OPP2(ODM slice1) |
+ * | y | |
+ * | | <- w -> |
+ * | _____V________|____ |
+ * | |DPP0 ^ |DPP1 |DPP2| |
+ * |<------ x |-----|->|/////| | |
+ * | | | |/////| | |
+ * | | h |/////| | |
+ * | | | |/////| | |
+ * | |_____V__|/////|____| |
+ * | | |
+ * | | |
+ * | | |
+ * |_________________________|________________________|
+ *
+ *
+ */
+ struct rect plane_clip;
+ struct rect mpc_slice_of_plane_clip;
+ struct rect odm_slice;
+ struct rect overlapping_area;
+
+ plane_clip = calculate_plane_rec_in_timing_active(pipe_ctx,
+ &pipe_ctx->plane_state->clip_rect);
+ /* guard plane clip from drawing beyond stream dst here */
+ plane_clip = intersect_rec(&plane_clip,
+ &pipe_ctx->stream->dst);
+ mpc_slice_of_plane_clip = calculate_mpc_slice_in_timing_active(
+ pipe_ctx, &plane_clip);
+ odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx);
+ overlapping_area = intersect_rec(&mpc_slice_of_plane_clip, &odm_slice);
+ if (overlapping_area.height > 0 &&
+ overlapping_area.width > 0) {
+ /* shift the overlapping area so it is with respect to current
+ * ODM slice's position
+ */
+ pipe_ctx->plane_res.scl_data.recout = shift_rec(
+ &overlapping_area,
+ -odm_slice.x, -odm_slice.y);
+ adjust_recout_for_visual_confirm(
+ &pipe_ctx->plane_res.scl_data.recout,
+ pipe_ctx);
+ } else {
+ /* if there is no overlap, zero recout */
+ memset(&pipe_ctx->plane_res.scl_data.recout, 0,
+ sizeof(struct rect));
+ }
+
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -996,33 +1292,30 @@ static void calculate_init_and_vp(
static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- const struct dc_stream_state *stream = pipe_ctx->stream;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = plane_state->src_rect;
+ struct rect recout_dst_in_active_timing;
+ struct rect recout_clip_in_active_timing;
+ struct rect recout_clip_in_recout_dst;
+ struct rect overlap_in_active_timing;
+ struct rect odm_slice = calculate_odm_slice_in_timing_active(pipe_ctx);
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
- int split_count, split_idx, ro_lb, ro_tb, recout_full_x, recout_full_y;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
- calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
- /*
- * recout full is what the recout would have been if we didnt clip
- * the source plane at all. We only care about left(ro_lb) and top(ro_tb)
- * offsets of recout within recout full because those are the directions
- * we scan from and therefore the only ones that affect inits.
- */
- recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
- * stream->dst.width / stream->src.width;
- recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
- * stream->dst.height / stream->src.height;
- if (pipe_ctx->prev_odm_pipe && split_idx)
- ro_lb = data->h_active * split_idx - recout_full_x;
- else if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->prev_odm_pipe)
- ro_lb = data->h_active * split_idx - recout_full_x + data->recout.x;
+ recout_clip_in_active_timing = shift_rec(
+ &data->recout, odm_slice.x, odm_slice.y);
+ recout_dst_in_active_timing = calculate_plane_rec_in_timing_active(
+ pipe_ctx, &plane_state->dst_rect);
+ overlap_in_active_timing = intersect_rec(&recout_clip_in_active_timing,
+ &recout_dst_in_active_timing);
+ if (overlap_in_active_timing.width > 0 &&
+ overlap_in_active_timing.height > 0)
+ recout_clip_in_recout_dst = shift_rec(&overlap_in_active_timing,
+ -recout_dst_in_active_timing.x,
+ -recout_dst_in_active_timing.y);
else
- ro_lb = data->recout.x - recout_full_x;
- ro_tb = data->recout.y - recout_full_y;
- ASSERT(ro_lb >= 0 && ro_tb >= 0);
+ memset(&recout_clip_in_recout_dst, 0, sizeof(struct rect));
/*
* Work in recout rotation since that requires less transformations
@@ -1041,7 +1334,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
calculate_init_and_vp(
flip_horz_scan_dir,
- ro_lb,
+ recout_clip_in_recout_dst.x,
data->recout.width,
src.width,
data->taps.h_taps,
@@ -1051,7 +1344,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
&data->viewport.width);
calculate_init_and_vp(
flip_horz_scan_dir,
- ro_lb,
+ recout_clip_in_recout_dst.x,
data->recout.width,
src.width / vpc_div,
data->taps.h_taps_c,
@@ -1061,7 +1354,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
&data->viewport_c.width);
calculate_init_and_vp(
flip_vert_scan_dir,
- ro_tb,
+ recout_clip_in_recout_dst.y,
data->recout.height,
src.height,
data->taps.v_taps,
@@ -1071,7 +1364,7 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
&data->viewport.height);
calculate_init_and_vp(
flip_vert_scan_dir,
- ro_tb,
+ recout_clip_in_recout_dst.y,
data->recout.height,
src.height / vpc_div,
data->taps.v_taps_c,
@@ -1096,6 +1389,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx);
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -1120,30 +1414,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->dst.y += timing->v_border_top;
/* Calculate H and V active size */
- pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
- timing->h_border_left + timing->h_border_right;
- pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
- timing->v_border_top + timing->v_border_bottom;
- if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe) {
- pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
-
- DC_LOG_SCALER("%s pipe %d: next_odm_pipe:%d prev_odm_pipe:%d\n",
- __func__,
- pipe_ctx->pipe_idx,
- pipe_ctx->next_odm_pipe ? pipe_ctx->next_odm_pipe->pipe_idx : -1,
- pipe_ctx->prev_odm_pipe ? pipe_ctx->prev_odm_pipe->pipe_idx : -1);
- } /* ODM + windows MPO, where window is on either right or left ODM half */
- else if (pipe_ctx->top_pipe && (pipe_ctx->top_pipe->next_odm_pipe || pipe_ctx->top_pipe->prev_odm_pipe)) {
-
- pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx->top_pipe) + 1;
-
- DC_LOG_SCALER("%s ODM + windows MPO: pipe:%d top_pipe:%d top_pipe->next_odm_pipe:%d top_pipe->prev_odm_pipe:%d\n",
- __func__,
- pipe_ctx->pipe_idx,
- pipe_ctx->top_pipe->pipe_idx,
- pipe_ctx->top_pipe->next_odm_pipe ? pipe_ctx->top_pipe->next_odm_pipe->pipe_idx : -1,
- pipe_ctx->top_pipe->prev_odm_pipe ? pipe_ctx->top_pipe->prev_odm_pipe->pipe_idx : -1);
- }
+ pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width;
+ pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height;
+
/* depends on h_active */
calculate_recout(pipe_ctx);
/* depends on pixel format */
@@ -1225,17 +1498,12 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
}
- if (!pipe_ctx->stream->ctx->dc->config.enable_windowed_mpo_odm) {
- if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
- pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
- res = false;
- } else {
- /* Clamp minimum viewport size */
- if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE)
- pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE;
- if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
- pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE;
- }
+ /* Clamp minimum viewport size */
+ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE)
+ pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE;
+ if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
+ pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE;
+
DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n"
"src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n",
@@ -1287,7 +1555,7 @@ enum dc_status resource_build_scaling_params_for_context(
return DC_OK;
}
-struct pipe_ctx *find_idle_secondary_pipe(
+struct pipe_ctx *resource_find_free_secondary_pipe_legacy(
struct resource_context *res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe)
@@ -1347,73 +1615,182 @@ struct pipe_ctx *find_idle_secondary_pipe(
return secondary_pipe;
}
-struct pipe_ctx *resource_get_head_pipe_for_stream(
- struct resource_context *res_ctx,
- struct dc_stream_state *stream)
+int resource_find_free_pipe_used_in_cur_mpc_blending_tree(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct pipe_ctx *cur_opp_head)
{
+ const struct pipe_ctx *cur_sec_dpp = cur_opp_head->bottom_pipe;
+ struct pipe_ctx *new_pipe;
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+
+ while (cur_sec_dpp) {
+ /* find a free pipe used in current opp blend tree,
+ * this is to avoid MPO pipe switching to different opp blending
+ * tree
+ */
+ new_pipe = &new_res_ctx->pipe_ctx[cur_sec_dpp->pipe_idx];
+ if (resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = cur_sec_dpp->pipe_idx;
+ break;
+ }
+ cur_sec_dpp = cur_sec_dpp->bottom_pipe;
+ }
+
+ return free_pipe_idx;
+}
+
+int recource_find_free_pipe_not_used_in_cur_res_ctx(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool)
+{
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+ const struct pipe_ctx *new_pipe, *cur_pipe;
int i;
- for (i = 0; i < MAX_PIPES; i++) {
- if (res_ctx->pipe_ctx[i].stream == stream
- && !res_ctx->pipe_ctx[i].top_pipe
- && !res_ctx->pipe_ctx[i].prev_odm_pipe)
- return &res_ctx->pipe_ctx[i];
+ for (i = 0; i < pool->pipe_count; i++) {
+ cur_pipe = &cur_res_ctx->pipe_ctx[i];
+ new_pipe = &new_res_ctx->pipe_ctx[i];
+
+ if (resource_is_pipe_type(cur_pipe, FREE_PIPE) &&
+ resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = i;
+ break;
+ }
}
- return NULL;
+
+ return free_pipe_idx;
}
-static struct pipe_ctx *resource_get_tail_pipe(
- struct resource_context *res_ctx,
- struct pipe_ctx *head_pipe)
+int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool)
{
- struct pipe_ctx *tail_pipe;
-
- tail_pipe = head_pipe->bottom_pipe;
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+ const struct pipe_ctx *new_pipe, *cur_pipe;
+ int i;
- while (tail_pipe) {
- head_pipe = tail_pipe;
- tail_pipe = tail_pipe->bottom_pipe;
+ for (i = 0; i < pool->pipe_count; i++) {
+ cur_pipe = &cur_res_ctx->pipe_ctx[i];
+ new_pipe = &new_res_ctx->pipe_ctx[i];
+
+ if (resource_is_pipe_type(cur_pipe, DPP_PIPE) &&
+ !resource_is_pipe_type(cur_pipe, OPP_HEAD) &&
+ resource_is_for_mpcc_combine(cur_pipe) &&
+ resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = i;
+ break;
+ }
}
- return head_pipe;
+ return free_pipe_idx;
}
-/*
- * A free_pipe for a stream is defined here as a pipe
- * that has no surface attached yet
- */
-static struct pipe_ctx *acquire_free_pipe_for_head(
- struct dc_state *context,
- const struct resource_pool *pool,
- struct pipe_ctx *head_pipe)
+int resource_find_any_free_pipe(struct resource_context *new_res_ctx,
+ const struct resource_pool *pool)
{
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+ const struct pipe_ctx *new_pipe;
int i;
- struct resource_context *res_ctx = &context->res_ctx;
- if (!head_pipe->plane_state)
- return head_pipe;
+ for (i = 0; i < pool->pipe_count; i++) {
+ new_pipe = &new_res_ctx->pipe_ctx[i];
- /* Re-use pipe already acquired for this stream if available*/
- for (i = pool->pipe_count - 1; i >= 0; i--) {
- if (res_ctx->pipe_ctx[i].stream == head_pipe->stream &&
- !res_ctx->pipe_ctx[i].plane_state) {
- return &res_ctx->pipe_ctx[i];
+ if (resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = i;
+ break;
}
}
- /*
- * At this point we have no re-useable pipe for this stream and we need
- * to acquire an idle one to satisfy the request
+ return free_pipe_idx;
+}
+
+bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type)
+{
+#ifdef DBG
+ if (pipe_ctx->stream == NULL) {
+ /* a free pipe with dangling states */
+ ASSERT(!pipe_ctx->plane_state);
+ ASSERT(!pipe_ctx->prev_odm_pipe);
+ ASSERT(!pipe_ctx->next_odm_pipe);
+ ASSERT(!pipe_ctx->top_pipe);
+ ASSERT(!pipe_ctx->bottom_pipe);
+ } else if (pipe_ctx->top_pipe) {
+ /* a secondary DPP pipe must be signed to a plane */
+ ASSERT(pipe_ctx->plane_state)
+ }
+ /* Add more checks here to prevent corrupted pipe ctx. It is very hard
+ * to debug this issue afterwards because we can't pinpoint the code
+ * location causing inconsistent pipe context states.
*/
+#endif
+ switch (type) {
+ case OTG_MASTER:
+ return !pipe_ctx->prev_odm_pipe &&
+ !pipe_ctx->top_pipe &&
+ pipe_ctx->stream;
+ case OPP_HEAD:
+ return !pipe_ctx->top_pipe && pipe_ctx->stream;
+ case DPP_PIPE:
+ return pipe_ctx->plane_state && pipe_ctx->stream;
+ case FREE_PIPE:
+ return !pipe_ctx->plane_state && !pipe_ctx->stream;
+ default:
+ return false;
+ }
+}
- if (!pool->funcs->acquire_idle_pipe_for_layer) {
- if (!pool->funcs->acquire_idle_pipe_for_head_pipe_in_layer)
- return NULL;
- else
- return pool->funcs->acquire_idle_pipe_for_head_pipe_in_layer(context, pool, head_pipe->stream, head_pipe);
+bool resource_is_for_mpcc_combine(const struct pipe_ctx *pipe_ctx)
+{
+ return resource_get_num_mpc_splits(pipe_ctx) > 0;
+}
+
+struct pipe_ctx *resource_get_otg_master_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream == stream &&
+ resource_is_pipe_type(&res_ctx->pipe_ctx[i], OTG_MASTER))
+ return &res_ctx->pipe_ctx[i];
}
+ return NULL;
+}
+
+struct pipe_ctx *resource_get_otg_master(const struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *otg_master = resource_get_opp_head(pipe_ctx);
- return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream);
+ while (otg_master->prev_odm_pipe)
+ otg_master = otg_master->prev_odm_pipe;
+ return otg_master;
+}
+
+struct pipe_ctx *resource_get_opp_head(const struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *opp_head = (struct pipe_ctx *) pipe_ctx;
+
+ ASSERT(!resource_is_pipe_type(opp_head, FREE_PIPE));
+ while (opp_head->top_pipe)
+ opp_head = opp_head->top_pipe;
+ return opp_head;
+}
+
+static struct pipe_ctx *get_tail_pipe(
+ struct pipe_ctx *head_pipe)
+{
+ struct pipe_ctx *tail_pipe = head_pipe->bottom_pipe;
+
+ while (tail_pipe) {
+ head_pipe = tail_pipe;
+ tail_pipe = tail_pipe->bottom_pipe;
+ }
+
+ return head_pipe;
}
static int acquire_first_split_pipe(
@@ -1448,253 +1825,124 @@ static int acquire_first_split_pipe(
return i;
}
}
- return -1;
+ return UNABLE_TO_SPLIT;
}
-bool dc_add_plane_to_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
+static bool add_plane_to_opp_head_pipes(struct pipe_ctx *otg_master_pipe,
struct dc_plane_state *plane_state,
struct dc_state *context)
{
- int i;
- struct resource_pool *pool = dc->res_pool;
- struct pipe_ctx *head_pipe, *tail_pipe, *free_pipe;
- struct dc_stream_status *stream_status = NULL;
- struct pipe_ctx *prev_right_head = NULL;
- struct pipe_ctx *free_right_pipe = NULL;
- struct pipe_ctx *prev_left_head = NULL;
+ struct pipe_ctx *opp_head_pipe = otg_master_pipe;
- DC_LOGGER_INIT(stream->ctx->logger);
- for (i = 0; i < context->stream_count; i++)
- if (context->streams[i] == stream) {
- stream_status = &context->stream_status[i];
- break;
+ while (opp_head_pipe) {
+ if (opp_head_pipe->plane_state) {
+ ASSERT(0);
+ return false;
}
- if (stream_status == NULL) {
- dm_error("Existing stream not found; failed to attach surface!\n");
- return false;
+ opp_head_pipe->plane_state = plane_state;
+ opp_head_pipe = opp_head_pipe->next_odm_pipe;
}
+ return true;
+}
- if (stream_status->plane_count == MAX_SURFACE_NUM) {
- dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
- plane_state, MAX_SURFACE_NUM);
- return false;
+static void insert_secondary_dpp_pipe_with_plane(struct pipe_ctx *opp_head_pipe,
+ struct pipe_ctx *sec_pipe, struct dc_plane_state *plane_state)
+{
+ struct pipe_ctx *tail_pipe = get_tail_pipe(opp_head_pipe);
+
+ tail_pipe->bottom_pipe = sec_pipe;
+ sec_pipe->top_pipe = tail_pipe;
+ if (tail_pipe->prev_odm_pipe) {
+ ASSERT(tail_pipe->prev_odm_pipe->bottom_pipe);
+ sec_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe;
+ tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = sec_pipe;
}
+ sec_pipe->plane_state = plane_state;
+}
- head_pipe = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+/* for each opp head pipe of an otg master pipe, acquire a secondary dpp pipe
+ * and add the plane. So the plane is added to all MPC blend trees associated
+ * with the otg master pipe.
+ */
+static bool acquire_secondary_dpp_pipes_and_add_plane(
+ struct pipe_ctx *otg_master_pipe,
+ struct dc_plane_state *plane_state,
+ struct dc_state *new_ctx,
+ struct dc_state *cur_ctx,
+ struct resource_pool *pool)
+{
+ struct pipe_ctx *opp_head_pipe, *sec_pipe;
- if (!head_pipe) {
- dm_error("Head pipe not found for stream_state %p !\n", stream);
+ if (!pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe)
return false;
- }
- /* retain new surface, but only once per stream */
- dc_plane_state_retain(plane_state);
-
- while (head_pipe) {
- free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
+ opp_head_pipe = otg_master_pipe;
+ while (opp_head_pipe) {
+ sec_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe(
+ cur_ctx,
+ new_ctx,
+ pool,
+ opp_head_pipe);
+ if (!sec_pipe) {
+ /* try tearing down MPCC combine */
+ int pipe_idx = acquire_first_split_pipe(
+ &new_ctx->res_ctx, pool,
+ otg_master_pipe->stream);
- if (!free_pipe) {
- int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
if (pipe_idx >= 0)
- free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
+ sec_pipe = &new_ctx->res_ctx.pipe_ctx[pipe_idx];
}
- if (!free_pipe) {
- dc_plane_state_release(plane_state);
+ if (!sec_pipe)
return false;
- }
-
- free_pipe->plane_state = plane_state;
- if (head_pipe != free_pipe) {
- tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
- ASSERT(tail_pipe);
-
- /* ODM + window MPO, where MPO window is on right half only */
- if (free_pipe->plane_state &&
- (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2) &&
- tail_pipe->next_odm_pipe) {
-
- /* For ODM + window MPO, in 3 plane case, if we already have a MPO window on
- * the right side, then we will invalidate a 2nd one on the right side
- */
- if (head_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
- dc_plane_state_release(plane_state);
- return false;
- }
-
- DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d tail_pipe->next_odm_pipe:%d\n",
- __func__,
- free_pipe->pipe_idx,
- tail_pipe->next_odm_pipe ? tail_pipe->next_odm_pipe->pipe_idx : -1);
-
- /*
- * We want to avoid the case where the right side already has a pipe assigned to
- * it and is different from free_pipe ( which would cause trigger a pipe
- * reallocation ).
- * Check the old context to see if the right side already has a pipe allocated
- * - If not, continue to use free_pipe
- * - If the right side already has a pipe, use that pipe instead if its available
- */
-
- /*
- * We also want to avoid the case where with three plane ( 2 MPO videos ), we have
- * both videos on the left side so one of the videos is invalidated. Then we
- * move the invalidated video back to the right side. If the order of the plane
- * states is such that the right MPO plane is processed first, the free pipe
- * selected by the head will be the left MPO pipe. But since there was no right
- * MPO pipe, it will assign the free pipe to the right MPO pipe instead and
- * a pipe reallocation will occur.
- * Check the old context to see if the left side already has a pipe allocated
- * - If not, continue to use free_pipe
- * - If the left side is already using this pipe, then pick another pipe for right
- */
-
- prev_right_head = &dc->current_state->res_ctx.pipe_ctx[tail_pipe->next_odm_pipe->pipe_idx];
- if ((prev_right_head->bottom_pipe) &&
- (free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) {
- free_right_pipe = acquire_free_pipe_for_head(context, pool, tail_pipe->next_odm_pipe);
- } else {
- prev_left_head = &dc->current_state->res_ctx.pipe_ctx[head_pipe->pipe_idx];
- if ((prev_left_head->bottom_pipe) &&
- (free_pipe->pipe_idx == prev_left_head->bottom_pipe->pipe_idx)) {
- free_right_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
- }
- }
-
- if (free_right_pipe) {
- free_pipe->stream = NULL;
- memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource));
- memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource));
- free_pipe->plane_state = NULL;
- free_pipe->pipe_idx = 0;
- free_right_pipe->plane_state = plane_state;
- free_pipe = free_right_pipe;
- }
-
- free_pipe->stream_res.tg = tail_pipe->next_odm_pipe->stream_res.tg;
- free_pipe->stream_res.abm = tail_pipe->next_odm_pipe->stream_res.abm;
- free_pipe->stream_res.opp = tail_pipe->next_odm_pipe->stream_res.opp;
- free_pipe->stream_res.stream_enc = tail_pipe->next_odm_pipe->stream_res.stream_enc;
- free_pipe->stream_res.audio = tail_pipe->next_odm_pipe->stream_res.audio;
- free_pipe->clock_source = tail_pipe->next_odm_pipe->clock_source;
-
- free_pipe->top_pipe = tail_pipe->next_odm_pipe;
- tail_pipe->next_odm_pipe->bottom_pipe = free_pipe;
- } else if (free_pipe->plane_state &&
- (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2)
- && head_pipe->next_odm_pipe) {
-
- /* For ODM + window MPO, support 3 plane ( 2 MPO ) case.
- * Here we have a desktop ODM + left window MPO and a new MPO window appears
- * on the right side only. It fails the first case, because tail_pipe is the
- * left window MPO, so it has no next_odm_pipe. So in this scenario, we check
- * for head_pipe->next_odm_pipe instead
- */
- DC_LOG_SCALER("%s - ODM + win MPO (left) + win MPO (right). free_pipe:%d head_pipe->next_odm:%d\n",
- __func__,
- free_pipe->pipe_idx,
- head_pipe->next_odm_pipe ? head_pipe->next_odm_pipe->pipe_idx : -1);
-
- /*
- * We want to avoid the case where the right side already has a pipe assigned to
- * it and is different from free_pipe ( which would cause trigger a pipe
- * reallocation ).
- * Check the old context to see if the right side already has a pipe allocated
- * - If not, continue to use free_pipe
- * - If the right side already has a pipe, use that pipe instead if its available
- */
- prev_right_head = &dc->current_state->res_ctx.pipe_ctx[head_pipe->next_odm_pipe->pipe_idx];
- if ((prev_right_head->bottom_pipe) &&
- (free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) {
- free_right_pipe = acquire_free_pipe_for_head(context, pool, head_pipe->next_odm_pipe);
- if (free_right_pipe) {
- free_pipe->stream = NULL;
- memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource));
- memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource));
- free_pipe->plane_state = NULL;
- free_pipe->pipe_idx = 0;
- free_right_pipe->plane_state = plane_state;
- free_pipe = free_right_pipe;
- }
- }
-
- free_pipe->stream_res.tg = head_pipe->next_odm_pipe->stream_res.tg;
- free_pipe->stream_res.abm = head_pipe->next_odm_pipe->stream_res.abm;
- free_pipe->stream_res.opp = head_pipe->next_odm_pipe->stream_res.opp;
- free_pipe->stream_res.stream_enc = head_pipe->next_odm_pipe->stream_res.stream_enc;
- free_pipe->stream_res.audio = head_pipe->next_odm_pipe->stream_res.audio;
- free_pipe->clock_source = head_pipe->next_odm_pipe->clock_source;
-
- free_pipe->top_pipe = head_pipe->next_odm_pipe;
- head_pipe->next_odm_pipe->bottom_pipe = free_pipe;
- } else {
-
- /* For ODM + window MPO, in 3 plane case, if we already have a MPO window on
- * the left side, then we will invalidate a 2nd one on the left side
- */
- if (head_pipe->next_odm_pipe && tail_pipe->top_pipe) {
- dc_plane_state_release(plane_state);
- return false;
- }
-
- free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
- free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
- free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
- free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
- free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
- free_pipe->clock_source = tail_pipe->clock_source;
-
- free_pipe->top_pipe = tail_pipe;
- tail_pipe->bottom_pipe = free_pipe;
-
- /* Connect MPO pipes together if MPO window is in the centre */
- if (!(free_pipe->plane_state &&
- (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <=
- free_pipe->stream->src.x + free_pipe->stream->src.width/2))) {
- if (!free_pipe->next_odm_pipe &&
- tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
- free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe;
- tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe;
- }
- if (!free_pipe->prev_odm_pipe &&
- tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) {
- free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe;
- tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe;
- }
- }
- }
- }
+ insert_secondary_dpp_pipe_with_plane(opp_head_pipe, sec_pipe,
+ plane_state);
+ opp_head_pipe = opp_head_pipe->next_odm_pipe;
+ }
+ return true;
+}
- /* ODM + window MPO, where MPO window is on left half only */
- if (free_pipe->plane_state &&
- (free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <=
- free_pipe->stream->src.x + free_pipe->stream->src.width/2)) {
- DC_LOG_SCALER("%s - ODM + window MPO(left). free_pipe:%d\n",
- __func__,
- free_pipe->pipe_idx);
- break;
- }
- /* ODM + window MPO, where MPO window is on right half only */
- if (free_pipe->plane_state &&
- (free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2)) {
- DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d\n",
- __func__,
- free_pipe->pipe_idx);
- break;
- }
+bool dc_add_plane_to_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context)
+{
+ struct resource_pool *pool = dc->res_pool;
+ struct pipe_ctx *otg_master_pipe;
+ struct dc_stream_status *stream_status = NULL;
+ bool added = false;
- head_pipe = head_pipe->next_odm_pipe;
+ stream_status = dc_stream_get_status_from_state(context, stream);
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to attach surface!\n");
+ goto out;
+ } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
+ plane_state, MAX_SURFACE_NUM);
+ goto out;
}
- /* assign new surfaces*/
- stream_status->plane_states[stream_status->plane_count] = plane_state;
- stream_status->plane_count++;
+ otg_master_pipe = resource_get_otg_master_for_stream(
+ &context->res_ctx, stream);
+ if (otg_master_pipe->plane_state == NULL)
+ added = add_plane_to_opp_head_pipes(otg_master_pipe,
+ plane_state, context);
+ else
+ added = acquire_secondary_dpp_pipes_and_add_plane(
+ otg_master_pipe, plane_state, context,
+ dc->current_state, pool);
+ if (added) {
+ stream_status->plane_states[stream_status->plane_count] =
+ plane_state;
+ stream_status->plane_count++;
+ dc_plane_state_retain(plane_state);
+ }
- return true;
+out:
+ return added;
}
bool dc_remove_plane_from_context(
@@ -1858,7 +2106,7 @@ bool dc_add_all_planes_for_stream(
return add_all_planes_for_stream(dc, stream, &set, 1, context);
}
-bool is_timing_changed(struct dc_stream_state *cur_stream,
+bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
{
if (cur_stream == NULL)
@@ -1883,7 +2131,7 @@ static bool are_stream_backends_same(
if (stream_a == NULL || stream_b == NULL)
return false;
- if (is_timing_changed(stream_a, stream_b))
+ if (dc_is_timing_changed(stream_a, stream_b))
return false;
if (stream_a->signal != stream_b->signal)
@@ -2198,7 +2446,7 @@ enum dc_status dc_remove_stream_from_ctx(
{
int i;
struct dc_context *dc_ctx = dc->ctx;
- struct pipe_ctx *del_pipe = resource_get_head_pipe_for_stream(&new_ctx->res_ctx, stream);
+ struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(&new_ctx->res_ctx, stream);
struct pipe_ctx *odm_pipe;
if (!del_pipe) {
@@ -3014,23 +3262,29 @@ static void set_avi_info_frame(
hdmi_info.bits.S0_S1 = scan_type;
/* C0, C1 : Colorimetry */
- if (color_space == COLOR_SPACE_YCBCR709 ||
- color_space == COLOR_SPACE_YCBCR709_LIMITED)
+ switch (color_space) {
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
- else if (color_space == COLOR_SPACE_YCBCR601 ||
- color_space == COLOR_SPACE_YCBCR601_LIMITED)
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
hdmi_info.bits.C0_C1 = COLORIMETRY_ITU601;
- else {
- hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA;
- }
- if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
- color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE ||
- color_space == COLOR_SPACE_2020_YCBCR) {
+ break;
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_2020_YCBCR:
hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
- } else if (color_space == COLOR_SPACE_ADOBERGB) {
+ break;
+ case COLOR_SPACE_ADOBERGB:
hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
+ break;
+ case COLOR_SPACE_SRGB:
+ default:
+ hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA;
+ break;
}
if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR &&
@@ -3446,7 +3700,7 @@ enum dc_status resource_map_clock_resources(
{
/* acquire new resources */
const struct resource_pool *pool = dc->res_pool;
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(
&context->res_ctx, stream);
if (!pipe_ctx)
@@ -3508,7 +3762,7 @@ bool pipe_need_reprogram(
if (pipe_ctx_old->stream_res.stream_enc != pipe_ctx->stream_res.stream_enc)
return true;
- if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+ if (dc_is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
return true;
if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
@@ -3836,10 +4090,7 @@ void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i];
pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx_old->stream)
- continue;
-
- if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ if (!resource_is_pipe_type(pipe_ctx_old, OTG_MASTER))
continue;
if (!pipe_ctx->stream ||
@@ -3963,11 +4214,13 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link,
* with an hpo encoder. Or we can return a very dummy one that doesn't
* do work for all functions
*/
- return get_hpo_dp_link_hwss();
+ return (requires_fixed_vs_pe_retimer_hpo_link_hwss(link) ?
+ get_hpo_fixed_vs_pe_retimer_dp_link_hwss() : get_hpo_dp_link_hwss());
else if (can_use_dpia_link_hwss(link, link_res))
return get_dpia_link_hwss();
else if (can_use_dio_link_hwss(link, link_res))
- return get_dio_link_hwss();
+ return (requires_fixed_vs_pe_retimer_dio_link_hwss(link)) ?
+ get_dio_fixed_vs_pe_retimer_link_hwss() : get_dio_link_hwss();
else
return get_virtual_link_hwss();
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 72b261ad9587..01fe2d2fd241 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -71,8 +71,7 @@ static bool dc_stream_construct(struct dc_stream_state *stream,
/* Copy audio modes */
/* TODO - Remove this translation */
- for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++)
- {
+ for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++) {
stream->audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count;
stream->audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code;
stream->audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate;
@@ -276,8 +275,8 @@ static void program_cursor_attributes(
}
dc->hwss.set_cursor_attribute(pipe_ctx);
-
- dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
+ if (dc->ctx->dmub_srv)
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
@@ -306,6 +305,32 @@ bool dc_optimize_timing_for_fsft(
}
#endif
+static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream)
+{
+ uint32_t refresh_rate;
+ struct dc *dc = stream->ctx->dc;
+
+ refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 +
+ stream->timing.v_total * stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, stream->timing.h_total);
+
+ /* If there's any stream that fits the SubVP high refresh criteria,
+ * we must return true. This is because cursor updates are asynchronous
+ * with full updates, so we could transition into a SubVP config and
+ * remain in HW cursor mode if there's no cursor update which will
+ * then cause corruption.
+ */
+ if ((refresh_rate >= 120 && refresh_rate <= 175 &&
+ stream->timing.v_addressable >= 1440 &&
+ stream->timing.v_addressable <= 2160) &&
+ (dc->current_state->stream_count > 1 ||
+ (dc->current_state->stream_count == 1 && !stream->allow_freesync)))
+ return true;
+
+ return false;
+}
+
/*
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
@@ -334,12 +359,13 @@ bool dc_stream_set_cursor_attributes(
/* SubVP is not compatible with HW cursor larger than 64 x 64 x 4.
* Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case:
- * 1. For single display cases, if resolution is >= 5K and refresh rate < 120hz
- * 2. For multi display cases, if resolution is >= 4K and refresh rate < 120hz
- *
- * [< 120hz is a requirement for SubVP configs]
+ * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs)
+ * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz
+ * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz
*/
if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) {
+ if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
+ return false;
if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
return false;
@@ -396,8 +422,8 @@ static void program_cursor_position(
}
dc->hwss.set_cursor_position(pipe_ctx);
-
- dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
+ if (dc->ctx->dmub_srv)
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
}
if (pipe_to_program)
@@ -490,25 +516,6 @@ bool dc_stream_add_writeback(struct dc *dc,
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
dwb->otg_inst = stream_status->primary_otg_inst;
}
- if (IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
- dm_error("DC: update_bandwidth failed!\n");
- return false;
- }
-
- /* enable writeback */
- if (dc->hwss.enable_writeback) {
- struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
-
- if (dwb->funcs->is_enabled(dwb)) {
- /* writeback pipe already enabled, only need to update */
- dc->hwss.update_writeback(dc, wb_info, dc->current_state);
- } else {
- /* Enable writeback pipe from scratch*/
- dc->hwss.enable_writeback(dc, wb_info, dc->current_state);
- }
- }
- }
return true;
}
@@ -553,17 +560,6 @@ bool dc_stream_remove_writeback(struct dc *dc,
}
stream->num_wb_info = j;
- if (IS_DIAG_DC(dc->ctx->dce_environment)) {
- /* recalculate and apply DML parameters */
- if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
- dm_error("DC: update_bandwidth failed!\n");
- return false;
- }
-
- /* disable writeback */
- if (dc->hwss.disable_writeback)
- dc->hwss.disable_writeback(dc, dwb_pipe_inst);
- }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 30f0ba05a6e6..0d0bef8eb331 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-14 Advanced Micro Devices, Inc.
+ * Copyright 2012-2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -40,12 +40,14 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
+struct abm_save_restore;
+
/* forward declaration */
struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.230"
+#define DC_VER "3.2.247"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -60,7 +62,9 @@ struct dc_versions {
};
enum dp_protocol_version {
- DP_VERSION_1_4,
+ DP_VERSION_1_4 = 0,
+ DP_VERSION_2_1,
+ DP_VERSION_UNKNOWN,
};
enum dc_plane_type {
@@ -209,6 +213,8 @@ struct dc_color_caps {
struct dc_dmub_caps {
bool psr;
bool mclk_sw;
+ bool subvp_psr;
+ bool gecc_enable;
};
struct dc_caps {
@@ -262,6 +268,7 @@ struct dc_caps {
uint16_t subvp_pstate_allow_width_us;
uint16_t subvp_vertical_int_margin_us;
bool seamless_odm;
+ uint32_t max_v_total;
uint8_t subvp_drr_vblank_start_margin_us;
};
@@ -270,8 +277,13 @@ struct dc_bug_wa {
bool dedcn20_305_wa;
bool skip_clock_update;
bool lt_early_cr_pattern;
+ struct {
+ uint8_t uclk : 1;
+ uint8_t fclk : 1;
+ uint8_t dcfclk : 1;
+ uint8_t dcfclk_ds: 1;
+ } clock_update_disable_mask;
};
-
struct dc_dcc_surface_param {
struct dc_size surface_size;
enum surface_pixel_format format;
@@ -406,7 +418,7 @@ struct dc_config {
uint8_t force_bios_fixed_vs;
int sdpif_request_limit_words_per_umc;
bool use_old_fixed_vs_sequence;
- bool disable_subvp_drr;
+ bool dc_mode_clk_limit_support;
};
enum visual_confirm {
@@ -418,7 +430,9 @@ enum visual_confirm {
VISUAL_CONFIRM_SWAPCHAIN = 6,
VISUAL_CONFIRM_FAMS = 7,
VISUAL_CONFIRM_SWIZZLE = 9,
+ VISUAL_CONFIRM_REPLAY = 12,
VISUAL_CONFIRM_SUBVP = 14,
+ VISUAL_CONFIRM_MCLK_SWITCH = 16,
};
enum dc_psr_power_opts {
@@ -495,7 +509,7 @@ enum dcn_zstate_support_state {
DCN_ZSTATE_SUPPORT_DISALLOW,
};
-/**
+/*
* struct dc_clocks - DC pipe clocks
*
* For any clocks that may differ per pipe only the max is stored in this
@@ -698,6 +712,8 @@ struct dc_virtual_addr_space_config {
struct dc_bounding_box_overrides {
int sr_exit_time_ns;
int sr_enter_plus_exit_time_ns;
+ int sr_exit_z8_time_ns;
+ int sr_enter_plus_exit_z8_time_ns;
int urgent_latency_ns;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
@@ -715,7 +731,7 @@ struct resource_pool;
struct dce_hwseq;
struct link_service;
-/**
+/*
* struct dc_debug_options - DC debug struct
*
* This struct provides a simple mechanism for developers to change some
@@ -743,7 +759,7 @@ struct dc_debug_options {
bool use_max_lb;
enum dcc_option disable_dcc;
- /**
+ /*
* @pipe_split_policy: Define which pipe split policy is used by the
* display core.
*/
@@ -767,6 +783,8 @@ struct dc_debug_options {
int sr_enter_plus_exit_time_dpm0_ns;
int sr_exit_time_ns;
int sr_enter_plus_exit_time_ns;
+ int sr_exit_z8_time_ns;
+ int sr_enter_plus_exit_z8_time_ns;
int urgent_latency_ns;
uint32_t underflow_assert_delay_us;
int percent_of_ideal_drambw;
@@ -835,6 +853,7 @@ struct dc_debug_options {
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
bool disable_fams;
+ bool disable_fams_gaming;
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
@@ -845,6 +864,7 @@ struct dc_debug_options {
bool psr_skip_crtc_disable;
union dpia_debug_options dpia_debug;
bool disable_fixed_vs_aux_timeout_wa;
+ uint32_t fixed_vs_aux_delay_config_wa;
bool force_disable_subvp;
bool force_subvp_mclk_switch;
bool allow_sw_cursor_fallback;
@@ -855,7 +875,6 @@ struct dc_debug_options {
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
- uint32_t fixed_vs_aux_delay_config_wa;
bool extended_blank_optimization;
union aux_wake_wa_options aux_wake_wa;
uint32_t mst_start_top_delay;
@@ -879,9 +898,26 @@ struct dc_debug_options {
uint32_t fpo_vactive_margin_us;
bool disable_fpo_vactive;
bool disable_boot_optimizations;
+ bool override_odm_optimization;
+ bool minimize_dispclk_using_odm;
+ bool disable_subvp_high_refresh;
+ bool disable_dp_plus_plus_wa;
+ uint32_t fpo_vactive_min_active_margin_us;
+ uint32_t fpo_vactive_max_blank_us;
+ bool enable_legacy_fast_update;
+ bool disable_dc_mode_overwrite;
+ bool replay_skip_crtc_disabled;
};
struct gpu_info_soc_bounding_box_v1_0;
+
+/* Generic structure that can be used to query properties of DC. More fields
+ * can be added as required.
+ */
+struct dc_current_properties {
+ unsigned int cursor_size_limit;
+};
+
struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
@@ -1242,6 +1278,16 @@ struct dc_scaling_info {
struct scaling_taps scaling_quality;
};
+struct dc_fast_update {
+ const struct dc_flip_addrs *flip_addr;
+ const struct dc_gamma *gamma;
+ const struct colorspace_transform *gamut_remap_matrix;
+ const struct dc_csc_transform *input_csc_color_matrix;
+ const struct fixed31_32 *coeff_reduction_factor;
+ struct dc_transfer_func *out_transfer_func;
+ struct dc_csc_transform *output_csc_transform;
+};
+
struct dc_surface_update {
struct dc_plane_state *surface;
@@ -1301,7 +1347,7 @@ struct dc_validation_set {
struct dc_stream_state *stream;
/**
- * @plane_state: Surface state
+ * @plane_states: Surface state
*/
struct dc_plane_state *plane_states[MAX_SURFACES];
@@ -1376,10 +1422,14 @@ struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+void dc_set_disable_128b_132b_stream_overhead(bool disable);
+
/* The function returns minimum bandwidth required to drive a given timing
* return - minimum required timing bandwidth in kbps.
*/
-uint32_t dc_bandwidth_in_kbps_from_timing(const struct dc_crtc_timing *timing);
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding);
/* Link Interfaces */
/*
@@ -1448,6 +1498,7 @@ struct dc_link {
enum engine_id eng_id;
bool test_pattern_enabled;
+ enum dp_test_pattern current_test_pattern;
union compliance_test_state compliance_test_state;
void *priv;
@@ -1481,8 +1532,11 @@ struct dc_link {
enum edp_revision edp_revision;
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+ struct backlight_settings backlight_settings;
struct psr_settings psr_settings;
+ struct replay_settings replay_settings;
+
/* Drive settings read from integrated info table */
struct dc_lane_settings bios_forced_drive_settings;
@@ -1502,6 +1556,8 @@ struct dc_link {
/* Forced DPIA into TBT3 compatibility mode. */
bool dpia_forced_tbt3_mode;
bool dongle_mode_timing_override;
+ bool blank_stream_on_ocs_change;
+ bool read_dpcd204h_on_irq_hpd;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -1814,6 +1870,14 @@ enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(
*/
const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link);
+/* Get the highest encoding format that the link supports; highest meaning the
+ * encoding format which supports the maximum bandwidth.
+ *
+ * @link - a link with DP RX connection
+ * return - highest encoding format link supports.
+ */
+enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link);
+
/* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected
* to a link with dp connector signal type.
* @link - a link with dp connector signal type
@@ -1948,6 +2012,8 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
const struct dc_stream_state *stream, struct psr_config *psr_config,
struct psr_context *psr_context);
+bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state);
+
/* On eDP links this function call will stall until T12 has elapsed.
* If the panel is not in power off state, this function will return
* immediately.
@@ -2126,8 +2192,6 @@ struct dc_sink_init_data {
bool converter_disable_audio;
};
-bool dc_extended_blank_supported(struct dc *dc);
-
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
/* Newer interfaces */
@@ -2197,6 +2261,11 @@ void dc_z10_save_init(struct dc *dc);
bool dc_is_dmub_outbox_supported(struct dc *dc);
bool dc_enable_dmub_notifications(struct dc *dc);
+bool dc_abm_save_restore(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct abm_save_restore *pData);
+
void dc_enable_dmub_outbox(struct dc *dc);
bool dc_process_dmub_aux_transfer_async(struct dc *dc,
@@ -2220,10 +2289,17 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
uint32_t hpd_int_enable);
+void dc_print_dmub_diagnostic_data(const struct dc *dc);
+
+void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties);
+
/* DSC Interfaces */
#include "dc_dsc.h"
/* Disable acc mode Interfaces */
void dc_disable_accelerated_mode(struct dc *dc);
+bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
+ struct dc_stream_state *new_stream);
+
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index a9b9490a532c..4c5ef3ef8dbd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -31,6 +31,7 @@
#include "core_types.h"
#include "../basics/conversion.h"
#include "cursor_reg_cache.h"
+#include "resource.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -65,47 +66,6 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
}
-void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
- union dmub_rb_cmd *cmd)
-{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
- enum dmub_status status;
-
- status = dmub_srv_cmd_queue(dmub, cmd);
- if (status == DMUB_STATUS_OK)
- return;
-
- if (status != DMUB_STATUS_QUEUE_FULL)
- goto error;
-
- /* Execute and wait for queue to become empty again. */
- dc_dmub_srv_cmd_execute(dc_dmub_srv);
- dc_dmub_srv_wait_idle(dc_dmub_srv);
-
- /* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, cmd);
- if (status == DMUB_STATUS_OK)
- return;
-
-error:
- DC_ERROR("Error queuing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
-}
-
-void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
-{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
- enum dmub_status status;
-
- status = dmub_srv_cmd_execute(dmub);
- if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
-}
-
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
{
struct dmub_srv *dmub = dc_dmub_srv->dmub;
@@ -159,50 +119,89 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
}
}
-bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd)
+bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type);
+}
+
+bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
{
+ struct dc_context *dc_ctx;
struct dmub_srv *dmub;
enum dmub_status status;
+ int i;
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
+ dc_ctx = dc_dmub_srv->ctx;
dmub = dc_dmub_srv->dmub;
- status = dmub_srv_cmd_with_reply_data(dmub, cmd);
+ for (i = 0 ; i < count; i++) {
+ // Queue command
+ status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+
+ if (status == DMUB_STATUS_QUEUE_FULL) {
+ /* Execute and wait for queue to become empty again. */
+ dmub_srv_cmd_execute(dmub);
+ dmub_srv_wait_for_idle(dmub, 100000);
+
+ /* Requeue the command. */
+ status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ }
+
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error queueing DMUB command: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ return false;
+ }
+ }
+
+ status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
- DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
return false;
}
+ // Wait for DMUB to process command
+ if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+
+ if (status != DMUB_STATUS_OK) {
+ DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ return false;
+ }
+
+ // Copy data back from ring buffer into command
+ if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
+ dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
+ }
+
return true;
}
-void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv)
+bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dmub_srv *dmub;
+ struct dc_context *dc_ctx;
+ union dmub_fw_boot_status boot_status;
enum dmub_status status;
- for (;;) {
- /* Wait up to a second for PHY init. */
- status = dmub_srv_wait_for_phy_init(dmub, 1000000);
- if (status == DMUB_STATUS_OK)
- /* Initialization OK */
- break;
-
- DC_ERROR("DMCUB PHY init failed: status=%d\n", status);
- ASSERT(0);
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
- if (status != DMUB_STATUS_TIMEOUT)
- /*
- * Server likely initialized or we don't have
- * DMCUB HW support - this won't end.
- */
- break;
+ dmub = dc_dmub_srv->dmub;
+ dc_ctx = dc_dmub_srv->ctx;
- /* Continue spinning so we don't hang the ASIC. */
+ status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
+ return false;
}
+
+ return boot_status.bits.optimized_init_done;
}
bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
@@ -267,9 +266,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
// Send the command to the DMCUB.
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
@@ -283,9 +280,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
// Send the command to the DMCUB.
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
@@ -362,7 +357,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->stream && pipe->stream->fpo_in_use) {
+ if (resource_is_pipe_type(pipe, OTG_MASTER) && pipe->stream->fpo_in_use) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000;
@@ -378,21 +373,17 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
// Send the command to the DMCUB.
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
-void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub)
+void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
{
union dmub_rb_cmd cmd = { 0 };
- enum dmub_status status;
- if (!dmub) {
+ if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
return;
- }
memset(&cmd, 0, sizeof(cmd));
@@ -402,15 +393,10 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub)
cmd.query_feature_caps.header.ret_status = 1;
cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
- /* Send command to fw */
- status = dmub_srv_cmd_with_reply_data(dmub, &cmd);
-
- ASSERT(status == DMUB_STATUS_OK);
-
/* If command was processed, copy feature caps to dmub srv */
- if (status == DMUB_STATUS_OK &&
+ if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
cmd.query_feature_caps.header.ret_status == 0) {
- memcpy(&dmub->feature_caps,
+ memcpy(&dc_dmub_srv->dmub->feature_caps,
&cmd.query_feature_caps.query_feature_caps_data,
sizeof(struct dmub_feature_caps));
}
@@ -419,7 +405,6 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub)
void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
union dmub_rb_cmd cmd = { 0 };
- enum dmub_status status;
unsigned int panel_inst = 0;
dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst);
@@ -433,13 +418,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data);
cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
- // Send command to fw
- status = dmub_srv_cmd_with_reply_data(dc->ctx->dmub_srv->dmub, &cmd);
-
- ASSERT(status == DMUB_STATUS_OK);
-
// If command was processed, copy feature caps to dmub srv
- if (status == DMUB_STATUS_OK &&
+ if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
cmd.visual_confirm_color.header.ret_status == 0) {
memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
&cmd.visual_confirm_color.visual_confirm_color_data,
@@ -552,7 +532,8 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
- if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !resource_is_pipe_type(pipe, DPP_PIPE))
continue;
// Find the SubVP pipe
@@ -749,12 +730,10 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->stream)
- continue;
-
/* For SubVP pipe count, only count the top most (ODM / MPC) pipe
*/
- if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_is_pipe_type(pipe, DPP_PIPE) &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN)
subvp_pipes[subvp_count++] = pipe;
}
@@ -771,12 +750,14 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
* Any ODM or MPC splits being used in SubVP will be handled internally in
* populate_subvp_cmd_pipe_info
*/
- if (pipe->plane_state && pipe->stream->mall_stream_config.paired_stream &&
- !pipe->top_pipe && !pipe->prev_odm_pipe &&
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->stream->mall_stream_config.paired_stream &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
- } else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE &&
- !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ } else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->stream->mall_stream_config.type == SUBVP_NONE) {
// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
// we run through DML without calculating "natural" P-state support
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
@@ -797,9 +778,8 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
}
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
@@ -823,74 +803,40 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
return;
}
- DC_LOG_DEBUG(
- "DMCUB STATE\n"
- " dmcub_version : %08x\n"
- " scratch [0] : %08x\n"
- " scratch [1] : %08x\n"
- " scratch [2] : %08x\n"
- " scratch [3] : %08x\n"
- " scratch [4] : %08x\n"
- " scratch [5] : %08x\n"
- " scratch [6] : %08x\n"
- " scratch [7] : %08x\n"
- " scratch [8] : %08x\n"
- " scratch [9] : %08x\n"
- " scratch [10] : %08x\n"
- " scratch [11] : %08x\n"
- " scratch [12] : %08x\n"
- " scratch [13] : %08x\n"
- " scratch [14] : %08x\n"
- " scratch [15] : %08x\n"
- " pc : %08x\n"
- " unk_fault_addr : %08x\n"
- " inst_fault_addr : %08x\n"
- " data_fault_addr : %08x\n"
- " inbox1_rptr : %08x\n"
- " inbox1_wptr : %08x\n"
- " inbox1_size : %08x\n"
- " inbox0_rptr : %08x\n"
- " inbox0_wptr : %08x\n"
- " inbox0_size : %08x\n"
- " is_enabled : %d\n"
- " is_soft_reset : %d\n"
- " is_secure_reset : %d\n"
- " is_traceport_en : %d\n"
- " is_cw0_en : %d\n"
- " is_cw6_en : %d\n",
- diag_data.dmcub_version,
- diag_data.scratch[0],
- diag_data.scratch[1],
- diag_data.scratch[2],
- diag_data.scratch[3],
- diag_data.scratch[4],
- diag_data.scratch[5],
- diag_data.scratch[6],
- diag_data.scratch[7],
- diag_data.scratch[8],
- diag_data.scratch[9],
- diag_data.scratch[10],
- diag_data.scratch[11],
- diag_data.scratch[12],
- diag_data.scratch[13],
- diag_data.scratch[14],
- diag_data.scratch[15],
- diag_data.pc,
- diag_data.undefined_address_fault_addr,
- diag_data.inst_fetch_fault_addr,
- diag_data.data_write_fault_addr,
- diag_data.inbox1_rptr,
- diag_data.inbox1_wptr,
- diag_data.inbox1_size,
- diag_data.inbox0_rptr,
- diag_data.inbox0_wptr,
- diag_data.inbox0_size,
- diag_data.is_dmcub_enabled,
- diag_data.is_dmcub_soft_reset,
- diag_data.is_dmcub_secure_reset,
- diag_data.is_traceport_en,
- diag_data.is_cw0_enabled,
- diag_data.is_cw6_enabled);
+ DC_LOG_DEBUG("DMCUB STATE:");
+ DC_LOG_DEBUG(" dmcub_version : %08x", diag_data.dmcub_version);
+ DC_LOG_DEBUG(" scratch [0] : %08x", diag_data.scratch[0]);
+ DC_LOG_DEBUG(" scratch [1] : %08x", diag_data.scratch[1]);
+ DC_LOG_DEBUG(" scratch [2] : %08x", diag_data.scratch[2]);
+ DC_LOG_DEBUG(" scratch [3] : %08x", diag_data.scratch[3]);
+ DC_LOG_DEBUG(" scratch [4] : %08x", diag_data.scratch[4]);
+ DC_LOG_DEBUG(" scratch [5] : %08x", diag_data.scratch[5]);
+ DC_LOG_DEBUG(" scratch [6] : %08x", diag_data.scratch[6]);
+ DC_LOG_DEBUG(" scratch [7] : %08x", diag_data.scratch[7]);
+ DC_LOG_DEBUG(" scratch [8] : %08x", diag_data.scratch[8]);
+ DC_LOG_DEBUG(" scratch [9] : %08x", diag_data.scratch[9]);
+ DC_LOG_DEBUG(" scratch [10] : %08x", diag_data.scratch[10]);
+ DC_LOG_DEBUG(" scratch [11] : %08x", diag_data.scratch[11]);
+ DC_LOG_DEBUG(" scratch [12] : %08x", diag_data.scratch[12]);
+ DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]);
+ DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]);
+ DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]);
+ DC_LOG_DEBUG(" pc : %08x", diag_data.pc);
+ DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr);
+ DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr);
+ DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr);
+ DC_LOG_DEBUG(" inbox1_rptr : %08x", diag_data.inbox1_rptr);
+ DC_LOG_DEBUG(" inbox1_wptr : %08x", diag_data.inbox1_wptr);
+ DC_LOG_DEBUG(" inbox1_size : %08x", diag_data.inbox1_size);
+ DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr);
+ DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr);
+ DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size);
+ DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled);
+ DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset);
+ DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset);
+ DC_LOG_DEBUG(" is_traceport_en : %d", diag_data.is_traceport_en);
+ DC_LOG_DEBUG(" is_cw0_en : %d", diag_data.is_cw0_enabled);
+ DC_LOG_DEBUG(" is_cw6_en : %d", diag_data.is_cw6_enabled);
}
static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
@@ -953,6 +899,9 @@ static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
return true;
+ if (pipe_ctx->stream->link->replay_settings.config.replay_supported)
+ return true;
+
return false;
}
@@ -982,14 +931,6 @@ static void dc_build_cursor_update_payload0(
payload->panel_inst = panel_inst;
}
-static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv,
- union dmub_rb_cmd *cmd)
-{
- dc_dmub_srv_cmd_queue(dmub_srv, cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
- dc_dmub_srv_wait_idle(dmub_srv);
-}
-
static void dc_build_cursor_position_update_payload0(
struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
const struct hubp *hubp, const struct dpp *dpp)
@@ -1032,9 +973,11 @@ static void dc_build_cursor_attribute_update_payload1(
void dc_send_update_cursor_info_to_dmu(
struct pipe_ctx *pCtx, uint8_t pipe_idx)
{
- union dmub_rb_cmd cmd = { 0 };
- union dmub_cmd_update_cursor_info_data *update_cursor_info =
- &cmd.update_cursor_info.update_cursor_info_data;
+ union dmub_rb_cmd cmd[2];
+ union dmub_cmd_update_cursor_info_data *update_cursor_info_0 =
+ &cmd[0].update_cursor_info.update_cursor_info_data;
+
+ memset(cmd, 0, sizeof(cmd));
if (!dc_dmub_should_update_cursor_data(pCtx))
return;
@@ -1051,31 +994,64 @@ void dc_send_update_cursor_info_to_dmu(
{
/* Build Payload#0 Header */
- cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
- cmd.update_cursor_info.header.payload_bytes =
- sizeof(cmd.update_cursor_info.update_cursor_info_data);
- cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */
+ cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd[0].update_cursor_info.header.payload_bytes =
+ sizeof(cmd[0].update_cursor_info.update_cursor_info_data);
+ cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd
/* Prepare Payload */
- dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0);
+ dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0);
- dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx,
+ dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx,
pCtx->plane_res.hubp, pCtx->plane_res.dpp);
- /* Send update_curosr_info to queue */
- dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd);
- }
+ }
{
/* Build Payload#1 Header */
- memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data));
- cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
- cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
- cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */
+ cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
+ cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command.
dc_build_cursor_attribute_update_payload1(
- &cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
+ &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
/* Combine 2nd cmds update_curosr_info to DMU */
- dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
+ dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
}
+
+bool dc_dmub_check_min_version(struct dmub_srv *srv)
+{
+ if (!srv->hw_funcs.is_psrsu_supported)
+ return true;
+ return srv->hw_funcs.is_psrsu_supported(srv);
+}
+
+void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
+{
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ struct dmub_srv *dmub;
+ enum dmub_status status;
+ static const uint32_t timeout_us = 30;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
+ DC_LOG_ERROR("%s: invalid parameters.", __func__);
+ return;
+ }
+
+ dmub = dc_dmub_srv->dmub;
+
+ status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 0x0010, timeout_us);
+ if (status != DMUB_STATUS_OK) {
+ DC_LOG_ERROR("timeout updating trace buffer mask word\n");
+ return;
+ }
+
+ status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 0x0000, timeout_us);
+ if (status != DMUB_STATUS_OK) {
+ DC_LOG_ERROR("timeout updating trace buffer mask word\n");
+ return;
+ }
+
+ DC_LOG_DEBUG("Enabled DPIA trace\n");
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index d34f5563df2e..bb3fe162dd93 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -26,7 +26,7 @@
#ifndef _DMUB_DC_SRV_H_
#define _DMUB_DC_SRV_H_
-#include "os_types.h"
+#include "dm_services_types.h"
#include "dmub/dmub_srv.h"
struct dmub_srv;
@@ -52,16 +52,13 @@ struct dc_dmub_srv {
void *dm;
};
-void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
- union dmub_rb_cmd *cmd);
-
-void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv);
-
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
-void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv);
+bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
+
+bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
-bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd);
+bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type);
bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
unsigned int stream_mask);
@@ -77,7 +74,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal
void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst);
bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context);
-void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub);
+void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv);
void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
@@ -89,4 +86,8 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, b
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
+bool dc_dmub_check_min_version(struct dmub_srv *srv);
+
+void dc_dmub_srv_enable_dpia_trace(const struct dc *dc);
+
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 49aab1924665..cfaa39c5dd16 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -61,7 +61,7 @@ enum dc_link_rate {
*/
LINK_RATE_UHBR10 = 1000, // UHBR10 - 10.0 Gbps/Lane
LINK_RATE_UHBR13_5 = 1350, // UHBR13.5 - 13.5 Gbps/Lane
- LINK_RATE_UHBR20 = 2000, // UHBR10 - 20.0 Gbps/Lane
+ LINK_RATE_UHBR20 = 2000, // UHBR20 - 20.0 Gbps/Lane
};
enum dc_link_spread {
@@ -566,6 +566,12 @@ struct dpcd_amd_device_id {
uint8_t dal_version_byte2;
};
+struct target_luminance_value {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+};
+
struct dpcd_source_backlight_set {
struct {
uint8_t byte0;
@@ -1111,6 +1117,11 @@ struct edp_psr_info {
uint8_t force_psrsu_cap;
};
+struct replay_info {
+ uint8_t pixel_deviation_per_line;
+ uint8_t max_deviation_line;
+};
+
struct dprx_states {
bool cable_id_written;
};
@@ -1225,10 +1236,13 @@ struct dpcd_caps {
union dp_main_line_channel_coding_cap channel_coding_cap;
union dp_sink_video_fallback_formats fallback_formats;
union dp_fec_capability1 fec_cap1;
+ bool panel_luminance_control;
union dp_cable_id cable_id;
uint8_t edp_rev;
union edp_alpm_caps alpm_caps;
struct edp_psr_info psr_info;
+
+ struct replay_info pr_info;
};
union dpcd_sink_ext_caps {
@@ -1269,6 +1283,28 @@ union dpcd_psr_configuration {
unsigned char raw;
};
+union replay_enable_and_configuration {
+ struct {
+ unsigned char FREESYNC_PANEL_REPLAY_MODE :1;
+ unsigned char TIMING_DESYNC_ERROR_VERIFICATION :1;
+ unsigned char STATE_TRANSITION_ERROR_DETECTION :1;
+ unsigned char RESERVED0 :1;
+ unsigned char RESERVED1 :4;
+ } bits;
+ unsigned char raw;
+};
+
+union dpcd_replay_configuration {
+ struct {
+ unsigned char STATE_TRANSITION_ERROR_STATUS : 1;
+ unsigned char DESYNC_ERROR_STATUS : 1;
+ unsigned char SINK_DEVICE_REPLAY_STATUS : 3;
+ unsigned char SINK_FRAME_LOCKED : 2;
+ unsigned char RESERVED : 1;
+ } bits;
+ unsigned char raw;
+};
+
union dpcd_alpm_configuration {
struct {
unsigned char ENABLE : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 0e92a322c2ed..fe3078b8789e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -58,6 +58,7 @@ struct dc_dsc_config_options {
uint32_t dsc_min_slice_height_override;
uint32_t max_target_bpp_limit_override_x16;
uint32_t slice_height_granularity;
+ uint32_t dsc_force_odm_hslice_override;
};
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
@@ -72,6 +73,7 @@ bool dc_dsc_compute_bandwidth_range(
uint32_t max_bpp_x16,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range);
bool dc_dsc_compute_config(
@@ -80,6 +82,7 @@ bool dc_dsc_compute_config(
const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg);
uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index f43cce16bb6c..3907eeff560c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -41,19 +41,13 @@ static inline void submit_dmub_read_modify_write(
const struct dc_context *ctx)
{
struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
- bool gather = false;
offload->should_burst_write =
(offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1));
cmd_buf->header.payload_bytes =
sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
- gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
-
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
-
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+ dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
memset(cmd_buf, 0, sizeof(*cmd_buf));
@@ -66,17 +60,11 @@ static inline void submit_dmub_burst_write(
const struct dc_context *ctx)
{
struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
- bool gather = false;
cmd_buf->header.payload_bytes =
sizeof(uint32_t) * offload->reg_seq_count;
- gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
-
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
-
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+ dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
memset(cmd_buf, 0, sizeof(*cmd_buf));
@@ -88,17 +76,11 @@ static inline void submit_dmub_reg_wait(
const struct dc_context *ctx)
{
struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
- bool gather = false;
-
- gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
+ dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
memset(cmd_buf, 0, sizeof(*cmd_buf));
offload->reg_seq_count = 0;
-
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
}
struct dc_reg_value_masks {
@@ -151,7 +133,6 @@ static void dmub_flush_buffer_execute(
const struct dc_context *ctx)
{
submit_dmub_read_modify_write(offload, ctx);
- dc_dmub_srv_cmd_execute(ctx->dmub_srv);
}
static void dmub_flush_burst_write_buffer_execute(
@@ -159,7 +140,6 @@ static void dmub_flush_burst_write_buffer_execute(
const struct dc_context *ctx)
{
submit_dmub_burst_write(offload, ctx);
- dc_dmub_srv_cmd_execute(ctx->dmub_srv);
}
static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr,
@@ -484,8 +464,7 @@ void generic_reg_wait(const struct dc_context *ctx,
field_value = get_reg_field_value_ex(reg_val, mask, shift);
if (field_value == condition_value) {
- if (i * delay_between_poll_us > 1000 &&
- !IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ if (i * delay_between_poll_us > 1000)
DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
delay_between_poll_us * i / 1000,
func_name, line);
@@ -497,8 +476,7 @@ void generic_reg_wait(const struct dc_context *ctx,
delay_between_poll_us, time_out_num_tries,
func_name, line);
- if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
- BREAK_TO_DEBUGGER();
+ BREAK_TO_DEBUGGER();
}
void generic_write_indirect_reg(const struct dc_context *ctx,
@@ -691,8 +669,6 @@ void reg_sequence_start_execute(const struct dc_context *ctx)
default:
return;
}
-
- dc_dmub_srv_cmd_execute(ctx->dmub_srv);
}
}
@@ -712,3 +688,59 @@ void reg_sequence_wait_done(const struct dc_context *ctx)
dc_dmub_srv_wait_idle(ctx->dmub_srv);
}
}
+
+char *dce_version_to_string(const int version)
+{
+ switch (version) {
+ case DCE_VERSION_8_0:
+ return "DCE 8.0";
+ case DCE_VERSION_8_1:
+ return "DCE 8.1";
+ case DCE_VERSION_8_3:
+ return "DCE 8.3";
+ case DCE_VERSION_10_0:
+ return "DCE 10.0";
+ case DCE_VERSION_11_0:
+ return "DCE 11.0";
+ case DCE_VERSION_11_2:
+ return "DCE 11.2";
+ case DCE_VERSION_11_22:
+ return "DCE 11.22";
+ case DCE_VERSION_12_0:
+ return "DCE 12.0";
+ case DCE_VERSION_12_1:
+ return "DCE 12.1";
+ case DCN_VERSION_1_0:
+ return "DCN 1.0";
+ case DCN_VERSION_1_01:
+ return "DCN 1.0.1";
+ case DCN_VERSION_2_0:
+ return "DCN 2.0";
+ case DCN_VERSION_2_1:
+ return "DCN 2.1";
+ case DCN_VERSION_2_01:
+ return "DCN 2.0.1";
+ case DCN_VERSION_3_0:
+ return "DCN 3.0";
+ case DCN_VERSION_3_01:
+ return "DCN 3.0.1";
+ case DCN_VERSION_3_02:
+ return "DCN 3.0.2";
+ case DCN_VERSION_3_03:
+ return "DCN 3.0.3";
+ case DCN_VERSION_3_1:
+ return "DCN 3.1";
+ case DCN_VERSION_3_14:
+ return "DCN 3.1.4";
+ case DCN_VERSION_3_15:
+ return "DCN 3.1.5";
+ case DCN_VERSION_3_16:
+ return "DCN 3.1.6";
+ case DCN_VERSION_3_2:
+ return "DCN 3.2";
+ case DCN_VERSION_3_21:
+ return "DCN 3.2.1";
+ default:
+ return "Unknown";
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 25284006019c..3697ea1d14c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -131,6 +131,7 @@ union stream_update_flags {
uint32_t dsc_changed : 1;
uint32_t mst_bw : 1;
uint32_t crtc_timing_adjust : 1;
+ uint32_t fams_changed : 1;
} bits;
uint32_t raw;
@@ -171,6 +172,10 @@ struct mall_temp_config {
bool is_phantom_plane[MAX_PIPES];
};
+struct dc_stream_debug_options {
+ char force_odm_combine_segments;
+};
+
struct dc_stream_state {
// sink is deprecated, new code should not reference
// this pointer
@@ -181,6 +186,7 @@ struct dc_stream_state {
* a stream via the volatile dc_state rather than the static dc_link.
*/
struct link_encoder *link_enc;
+ struct dc_stream_debug_options debug;
struct dc_panel_patch sink_patches;
union display_content_support content_support;
struct dc_crtc_timing timing;
@@ -227,6 +233,7 @@ struct dc_stream_state {
*/
bool vrr_active_variable;
bool freesync_on_desktop;
+ bool vrr_active_fixed;
bool converter_disable_audio;
uint8_t qs_bit;
@@ -295,6 +302,7 @@ struct dc_stream_state {
bool vblank_synchronized;
bool fpo_in_use;
struct mall_stream_config mall_stream_config;
+ bool skip_edp_power_down;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -320,6 +328,7 @@ struct dc_stream_update {
bool integer_scaling_update;
bool *allow_freesync;
bool *vrr_active_variable;
+ bool *vrr_active_fixed;
struct colorspace_transform *gamut_remap;
enum dc_color_space *output_color_space;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 45ab48fe5d00..445ad79001ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -69,13 +69,6 @@ enum dce_environment {
DCE_ENV_VIRTUAL_HW
};
-/* Note: use these macro definitions instead of direct comparison! */
-#define IS_FPGA_MAXIMUS_DC(dce_environment) \
- (dce_environment == DCE_ENV_FPGA_MAXIMUS)
-
-#define IS_DIAG_DC(dce_environment) \
- (IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG))
-
struct dc_perf_trace {
unsigned long read_count;
unsigned long write_count;
@@ -83,7 +76,7 @@ struct dc_perf_trace {
unsigned long last_entry_write;
};
-#define MAX_SURFACE_NUM 4
+#define MAX_SURFACE_NUM 6
#define NUM_PIXEL_FORMATS 10
enum tiling_mode {
@@ -603,6 +596,7 @@ enum dc_psr_state {
PSR_STATE4b_FULL_FRAME,
PSR_STATE4c_FULL_FRAME,
PSR_STATE4_FULL_FRAME_POWERUP,
+ PSR_STATE4_FULL_FRAME_HW_LOCK,
PSR_STATE5,
PSR_STATE5a,
PSR_STATE5b,
@@ -884,7 +878,7 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_overall_throughput_0_mps; /* In MPs */
uint32_t branch_overall_throughput_1_mps; /* In MPs */
uint32_t branch_max_line_width;
- bool is_dp;
+ bool is_dp; /* Decoded format */
};
struct dc_golden_table {
@@ -907,6 +901,14 @@ enum dc_gpu_mem_alloc_type {
DC_MEM_ALLOC_TYPE_AGP
};
+enum dc_link_encoding_format {
+ DC_LINK_ENCODING_UNSPECIFIED = 0,
+ DC_LINK_ENCODING_DP_8b_10b,
+ DC_LINK_ENCODING_DP_128b_132b,
+ DC_LINK_ENCODING_HDMI_TMDS,
+ DC_LINK_ENCODING_HDMI_FRL
+};
+
enum dc_psr_version {
DC_PSR_VERSION_1 = 0,
DC_PSR_VERSION_SU_1 = 1,
@@ -1000,6 +1002,10 @@ struct link_mst_stream_allocation_table {
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
};
+struct backlight_settings {
+ uint32_t backlight_millinits;
+};
+
/* PSR feature flags */
struct psr_settings {
bool psr_feature_enabled; // PSR is supported by sink
@@ -1019,6 +1025,45 @@ struct psr_settings {
unsigned int psr_power_opt;
};
+enum replay_coasting_vtotal_type {
+ PR_COASTING_TYPE_NOM = 0,
+ PR_COASTING_TYPE_STATIC,
+ PR_COASTING_TYPE_FULL_SCREEN_VIDEO,
+ PR_COASTING_TYPE_TEST_HARNESS,
+ PR_COASTING_TYPE_NUM,
+};
+
+union replay_error_status {
+ struct {
+ unsigned char STATE_TRANSITION_ERROR :1;
+ unsigned char LINK_CRC_ERROR :1;
+ unsigned char DESYNC_ERROR :1;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
+struct replay_config {
+ bool replay_supported; // Replay feature is supported
+ unsigned int replay_power_opt_supported; // Power opt flags that are supported
+ bool replay_smu_opt_supported; // SMU optimization is supported
+ unsigned int replay_enable_option; // Replay enablement option
+ uint32_t debug_flags; // Replay debug flags
+ bool replay_timing_sync_supported; // Replay desync is supported
+ union replay_error_status replay_error_status; // Replay error status
+};
+
+/* Replay feature flags */
+struct replay_settings {
+ struct replay_config config; // Replay configuration
+ bool replay_feature_enabled; // Replay feature is ready for activating
+ bool replay_allow_active; // Replay is currently active
+ unsigned int replay_power_opt_active; // Power opt flags that are activated currently
+ bool replay_smu_opt_enable; // SMU optimization is enabled
+ uint16_t coasting_vtotal; // Current Coasting vtotal
+ uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; // Coasting vtotal table
+};
+
/* To split out "global" and "per-panel" config settings.
* Add a struct dc_panel_config under dc_link
*/
@@ -1045,9 +1090,11 @@ struct dc_panel_config {
struct psr {
bool disable_psr;
bool disallow_psrsu;
+ bool disallow_replay;
bool rc_disable;
bool rc_allow_static_screen;
bool rc_allow_fullscreen_VPB;
+ unsigned int replay_enable_option;
} psr;
/* ABM */
struct varib {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 0d7db132a20f..15b64c26d5a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,8 +29,8 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o \
-dmub_hw_lock_mgr.o dmub_outbox.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dmub_abm_lcd.o dce_panel_cntl.o \
+dmub_hw_lock_mgr.o dmub_outbox.o dmub_replay.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index e6c06325742a..168cb7094c95 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -266,7 +266,24 @@
type MASTER_COMM_INTERRUPT; \
type MASTER_COMM_CMD_REG_BYTE0; \
type MASTER_COMM_CMD_REG_BYTE1; \
- type MASTER_COMM_CMD_REG_BYTE2
+ type MASTER_COMM_CMD_REG_BYTE2; \
+ type ABM1_HG_BIN_33_40_SHIFT_INDEX; \
+ type ABM1_HG_BIN_33_64_SHIFT_FLAG; \
+ type ABM1_HG_BIN_41_48_SHIFT_INDEX; \
+ type ABM1_HG_BIN_49_56_SHIFT_INDEX; \
+ type ABM1_HG_BIN_57_64_SHIFT_INDEX; \
+ type ABM1_HG_RESULT_DATA; \
+ type ABM1_HG_RESULT_INDEX; \
+ type ABM1_ACE_SLOPE_DATA; \
+ type ABM1_ACE_OFFSET_DATA; \
+ type ABM1_ACE_OFFSET_SLOPE_INDEX; \
+ type ABM1_ACE_THRES_INDEX; \
+ type ABM1_ACE_IGNORE_MASTER_LOCK_EN; \
+ type ABM1_ACE_READBACK_DB_REG_VALUE_EN; \
+ type ABM1_ACE_DBUF_REG_UPDATE_PENDING; \
+ type ABM1_ACE_LOCK; \
+ type ABM1_ACE_THRES_DATA_1; \
+ type ABM1_ACE_THRES_DATA_2
struct dce_abm_shift {
ABM_REG_FIELD_LIST(uint8_t);
@@ -288,6 +305,16 @@ struct dce_abm_registers {
uint32_t DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES;
uint32_t DC_ABM1_HGLS_REG_READ_PROGRESS;
uint32_t DC_ABM1_ACE_OFFSET_SLOPE_0;
+ uint32_t DC_ABM1_ACE_OFFSET_SLOPE_DATA;
+ uint32_t DC_ABM1_ACE_PWL_CNTL;
+ uint32_t DC_ABM1_HG_BIN_33_40_SHIFT_INDEX;
+ uint32_t DC_ABM1_HG_BIN_33_64_SHIFT_FLAG;
+ uint32_t DC_ABM1_HG_BIN_41_48_SHIFT_INDEX;
+ uint32_t DC_ABM1_HG_BIN_49_56_SHIFT_INDEX;
+ uint32_t DC_ABM1_HG_BIN_57_64_SHIFT_INDEX;
+ uint32_t DC_ABM1_HG_RESULT_DATA;
+ uint32_t DC_ABM1_HG_RESULT_INDEX;
+ uint32_t DC_ABM1_ACE_THRES_DATA;
uint32_t DC_ABM1_ACE_THRES_12;
uint32_t MASTER_COMM_CNTL_REG;
uint32_t MASTER_COMM_CMD_REG;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index 07359eb89efc..e7acd6eec1fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -640,7 +640,7 @@ static void dce11_pplib_apply_display_requirements(
* on power saving.
*
*/
- pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)?
+ pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4) ?
pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 462c7a3ec3cc..ed8936405dfa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -920,25 +920,6 @@ static bool dce112_program_pix_clk(
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
struct bp_pixel_clock_parameters bp_pc_params = {0};
- if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
- unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
- unsigned dp_dto_ref_100hz = 7000000;
- unsigned clock_100hz = pll_settings->actual_pix_clk_100hz;
-
- /* Set DTO values: phase = target clock, modulo = reference clock */
- REG_WRITE(PHASE[inst], clock_100hz);
- REG_WRITE(MODULO[inst], dp_dto_ref_100hz);
-
- /* Enable DTO */
- if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
- REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
- DP_DTO0_ENABLE, 1,
- PIPE0_DTO_SRC_SEL, 1);
- else
- REG_UPDATE(PIXEL_RATE_CNTL[inst],
- DP_DTO0_ENABLE, 1);
- return true;
- }
/* First disable SS
* ATOMBIOS will enable by default SS on PLL for DP,
* do not disable it here
@@ -1015,25 +996,6 @@ static bool dcn31_program_pix_clk(
REG_UPDATE(PIXEL_RATE_CNTL[inst],
DP_DTO0_ENABLE, 1);
} else {
- if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
- unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
- unsigned dp_dto_ref_100hz = 7000000;
- unsigned clock_100hz = pll_settings->actual_pix_clk_100hz;
-
- /* Set DTO values: phase = target clock, modulo = reference clock */
- REG_WRITE(PHASE[inst], clock_100hz);
- REG_WRITE(MODULO[inst], dp_dto_ref_100hz);
-
- /* Enable DTO */
- if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
- REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
- DP_DTO0_ENABLE, 1,
- PIPE0_DTO_SRC_SEL, 1);
- else
- REG_UPDATE(PIXEL_RATE_CNTL[inst],
- DP_DTO0_ENABLE, 1);
- return true;
- }
if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
REG_UPDATE(PIXEL_RATE_CNTL[inst],
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index e74266cc0098..b87bfecb7755 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -76,9 +76,9 @@ static bool dce_dmcu_init(struct dmcu *dmcu)
}
static bool dce_dmcu_load_iram(struct dmcu *dmcu,
- unsigned int start_offset,
- const char *src,
- unsigned int bytes)
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
unsigned int count = 0;
@@ -1093,11 +1093,9 @@ static void dcn21_dmcu_construct(
dce_dmcu_construct(dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
- if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
- psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58);
- dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029);
- dmcu_dce->base.psp_version = psp_version;
- }
+ psp_version = dm_read_reg(ctx, mmMP0_SMN_C2PMSG_58);
+ dmcu_dce->base.auto_load_dmcu = ((psp_version & 0x00FF00FF) > 0x00110029);
+ dmcu_dce->base.psp_version = psp_version;
}
struct dmcu *dce_dmcu_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index a3fee929cd12..86233f94db4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -98,6 +98,29 @@
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+#define HWSEQ_PIXEL_RATE_REG_LIST_302(blk) \
+ SRII(PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PIXEL_RATE_CNTL, blk, 1),\
+ SRII(PIXEL_RATE_CNTL, blk, 2),\
+ SRII(PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PIXEL_RATE_CNTL, blk, 4)
+
+#define HWSEQ_PHYPLL_REG_LIST_302(blk) \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4)
+
+#define HWSEQ_PIXEL_RATE_REG_LIST_303(blk) \
+ SRII(PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PIXEL_RATE_CNTL, blk, 1)
+
+#define HWSEQ_PHYPLL_REG_LIST_303(blk) \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1)
+
+
#define HWSEQ_PHYPLL_REG_LIST_201(blk) \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1)
@@ -387,7 +410,11 @@
SR(MPC_CRC_RESULT_C), \
SR(MPC_CRC_RESULT_AR), \
SR(AZALIA_AUDIO_DTO), \
- SR(AZALIA_CONTROLLER_CLOCK_GATING)
+ SR(AZALIA_CONTROLLER_CLOCK_GATING), \
+ SR(HPO_TOP_CLOCK_CONTROL), \
+ SR(ODM_MEM_PWR_CTRL3), \
+ SR(DMU_MEM_PWR_CNTL), \
+ SR(MMHUBBUB_MEM_PWR_CNTL)
#define HWSEQ_DCN301_REG_LIST()\
SR(REFCLK_CNTL), \
@@ -508,8 +535,11 @@
SR(D5VGA_CONTROL), \
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
+ HWSEQ_PIXEL_RATE_REG_LIST_302(OTG), \
+ HWSEQ_PHYPLL_REG_LIST_302(OTG), \
SR(AZALIA_AUDIO_DTO), \
- SR(AZALIA_CONTROLLER_CLOCK_GATING)
+ SR(AZALIA_CONTROLLER_CLOCK_GATING), \
+ SR(HPO_TOP_CLOCK_CONTROL)
#define HWSEQ_DCN303_REG_LIST() \
HWSEQ_DCN_REG_LIST(), \
@@ -540,28 +570,6 @@
SR(AZALIA_CONTROLLER_CLOCK_GATING), \
SR(HPO_TOP_CLOCK_CONTROL)
-#define HWSEQ_PIXEL_RATE_REG_LIST_302(blk) \
- SRII(PIXEL_RATE_CNTL, blk, 0), \
- SRII(PIXEL_RATE_CNTL, blk, 1),\
- SRII(PIXEL_RATE_CNTL, blk, 2),\
- SRII(PIXEL_RATE_CNTL, blk, 3), \
- SRII(PIXEL_RATE_CNTL, blk, 4)
-
-#define HWSEQ_PHYPLL_REG_LIST_302(blk) \
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1),\
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2),\
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4)
-
-#define HWSEQ_PIXEL_RATE_REG_LIST_303(blk) \
- SRII(PIXEL_RATE_CNTL, blk, 0), \
- SRII(PIXEL_RATE_CNTL, blk, 1)
-
-#define HWSEQ_PHYPLL_REG_LIST_303(blk) \
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
- SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1)
-
struct dce_hwseq_registers {
uint32_t DCFE_CLOCK_CONTROL[6];
uint32_t DCFEV_CLOCK_CONTROL;
@@ -663,14 +671,15 @@ struct dce_hwseq_registers {
uint32_t MC_VM_XGMI_LFB_CNTL;
uint32_t AZALIA_AUDIO_DTO;
uint32_t AZALIA_CONTROLLER_CLOCK_GATING;
+ /* MMHUB VM */
+ uint32_t MC_VM_FB_LOCATION_BASE;
+ uint32_t MC_VM_FB_LOCATION_TOP;
+ uint32_t MC_VM_FB_OFFSET;
+ uint32_t MMHUBBUB_MEM_PWR_CNTL;
uint32_t HPO_TOP_CLOCK_CONTROL;
uint32_t ODM_MEM_PWR_CTRL3;
uint32_t DMU_MEM_PWR_CNTL;
- uint32_t MMHUBBUB_MEM_PWR_CNTL;
uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
- uint32_t MC_VM_FB_LOCATION_BASE;
- uint32_t MC_VM_FB_LOCATION_TOP;
- uint32_t MC_VM_FB_OFFSET;
uint32_t HPO_TOP_HW_CONTROL;
};
/* set field name */
@@ -915,6 +924,7 @@ struct dce_hwseq_registers {
#define HWSEQ_DCN30_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN2_MASK_SH_LIST(mask_sh), \
HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
+ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_GATE_DIS, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \
@@ -1012,7 +1022,8 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN19_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
- HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh)
+ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
+ HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_GATE_DIS, mask_sh)
#define HWSEQ_DCN303_MASK_SH_LIST(mask_sh) \
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 6d1b01c267b7..4f552c3e7663 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -442,10 +442,9 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
return dce_i2c_hw;
}
-static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(
- struct dce_i2c_hw *dce_i2c_hw,
- uint32_t timeout,
- enum i2c_channel_operation_result expected_result)
+static enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result(struct dce_i2c_hw *dce_i2c_hw,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result)
{
enum i2c_channel_operation_result result;
uint32_t i = 0;
@@ -509,11 +508,10 @@ static uint32_t get_transaction_timeout_hw(
return period_timeout * num_of_clock_stretches;
}
-static bool dce_i2c_hw_engine_submit_payload(
- struct dce_i2c_hw *dce_i2c_hw,
- struct i2c_payload *payload,
- bool middle_of_transaction,
- uint32_t speed)
+static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw,
+ struct i2c_payload *payload,
+ bool middle_of_transaction,
+ uint32_t speed)
{
struct i2c_request_transaction_data request;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index f1aeb6d1967c..e188447c8156 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -367,6 +367,7 @@ static bool dce_i2c_sw_engine_acquire_engine(
return true;
}
+
bool dce_i2c_engine_acquire_sw(
struct dce_i2c_sw *dce_i2c_sw,
struct ddc *ddc_handle)
@@ -392,12 +393,8 @@ bool dce_i2c_engine_acquire_sw(
return result;
}
-
-
-
-static void dce_i2c_sw_engine_submit_channel_request(
- struct dce_i2c_sw *engine,
- struct i2c_request_transaction_data *req)
+static void dce_i2c_sw_engine_submit_channel_request(struct dce_i2c_sw *engine,
+ struct i2c_request_transaction_data *req)
{
struct ddc *ddc = engine->ddc;
uint16_t clock_delay_div_4 = engine->clock_delay >> 2;
@@ -439,10 +436,9 @@ static void dce_i2c_sw_engine_submit_channel_request(
I2C_CHANNEL_OPERATION_FAILED;
}
-static bool dce_i2c_sw_engine_submit_payload(
- struct dce_i2c_sw *engine,
- struct i2c_payload *payload,
- bool middle_of_transaction)
+static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine,
+ struct i2c_payload *payload,
+ bool middle_of_transaction)
{
struct i2c_request_transaction_data request;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index fa314493ffc5..136bd93c3b65 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -941,9 +941,7 @@ bool dce110_link_encoder_validate_output_with_stream(
break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_LVDS:
- is_valid =
- (stream->timing.
- pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+ is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
break;
case SIGNAL_TYPE_VIRTUAL:
is_valid = true;
@@ -1645,7 +1643,7 @@ void dce110_link_encoder_enable_hpd(struct link_encoder *enc)
uint32_t hpd_enable = 0;
uint32_t value = dm_read_reg(ctx, addr);
- get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN);
+ hpd_enable = get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN);
if (hpd_enable == 0)
set_reg_field_value(value, 1, DC_HPD_CONTROL, DC_HPD_EN);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 9fc48208c2e4..d3e6544022b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -24,212 +24,167 @@
*/
#include "dmub_abm.h"
-#include "dce_abm.h"
+#include "dmub_abm_lcd.h"
#include "dc.h"
-#include "dc_dmub_srv.h"
-#include "dmub/dmub_srv.h"
#include "core_types.h"
-#include "dm_services.h"
-#include "reg_helper.h"
-#include "fixed31_32.h"
-
-#include "atom.h"
+#include "dmub_cmd.h"
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
-#define REG(reg) \
- (dce_abm->regs->reg)
+#define ABM_FEATURE_NO_SUPPORT 0
+#define ABM_LCD_SUPPORT 1
-#undef FN
-#define FN(reg_name, field_name) \
- dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name
+static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst)
+{
+ struct dc_context *dc = abm->ctx;
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int i;
+ int edp_num;
+ unsigned int ret = ABM_FEATURE_NO_SUPPORT;
-#define CTX \
- dce_abm->base.ctx
+ dc_get_edp_links(dc->dc, edp_links, &edp_num);
-#define DISABLE_ABM_IMMEDIATELY 255
+ for (i = 0; i < edp_num; i++) {
+ if (panel_inst == i)
+ break;
+ }
+ if (i < edp_num) {
+ ret = ABM_LCD_SUPPORT;
+ }
+ return ret;
+}
-static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
+static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight)
{
- union dmub_rb_cmd cmd;
- uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
- uint32_t edp_id_count = dc->dc_edp_id_count;
- int i;
- uint8_t panel_mask = 0;
-
- for (i = 0; i < edp_id_count; i++)
- panel_mask |= 0x01 << i;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
- cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
- cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
- cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
- cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask;
- cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
-
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dmub_abm_init(abm, backlight);
}
-static void dmub_abm_init(struct abm *abm, uint32_t backlight)
+static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm)
{
- struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
-
- REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x3);
- REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x1);
- REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x3);
- REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x1);
- REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x1);
-
- REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
- ABM1_HG_NUM_OF_BINS_SEL, 0,
- ABM1_HG_VMAX_SEL, 1,
- ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
-
- REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
- ABM1_IPCSC_COEFF_SEL_R, 2,
- ABM1_IPCSC_COEFF_SEL_G, 4,
- ABM1_IPCSC_COEFF_SEL_B, 2);
+ return dmub_abm_get_current_backlight(abm);
+}
- REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
- BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+static unsigned int dmub_abm_get_target_backlight_ex(struct abm *abm)
+{
+ return dmub_abm_get_target_backlight(abm);
+}
- REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
- BL1_PWM_TARGET_ABM_LEVEL, backlight);
+static bool dmub_abm_set_level_ex(struct abm *abm, uint32_t level)
+{
+ bool ret = false;
+ unsigned int feature_support, i;
+ uint8_t panel_mask0 = 0;
- REG_UPDATE(BL1_PWM_USER_LEVEL,
- BL1_PWM_USER_LEVEL, backlight);
+ for (i = 0; i < MAX_NUM_EDP; i++) {
+ feature_support = abm_feature_support(abm, i);
- REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
- ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
- ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+ if (feature_support == ABM_LCD_SUPPORT)
+ panel_mask0 |= (0x01 << i);
+ }
- REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
- ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
- ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
- ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+ if (panel_mask0)
+ ret = dmub_abm_set_level(abm, level, panel_mask0);
- dmub_abm_enable_fractional_pwm(abm->ctx);
+ return ret;
}
-static unsigned int dmub_abm_get_current_backlight(struct abm *abm)
+static bool dmub_abm_init_config_ex(struct abm *abm,
+ const char *src,
+ unsigned int bytes,
+ unsigned int inst)
{
- struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
- unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+ unsigned int feature_support;
- /* return backlight in hardware format which is unsigned 17 bits, with
- * 1 bit integer and 16 bit fractional
- */
- return backlight;
-}
+ feature_support = abm_feature_support(abm, inst);
-static unsigned int dmub_abm_get_target_backlight(struct abm *abm)
-{
- struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
- unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+ if (feature_support == ABM_LCD_SUPPORT)
+ dmub_abm_init_config(abm, src, bytes, inst);
- /* return backlight in hardware format which is unsigned 17 bits, with
- * 1 bit integer and 16 bit fractional
- */
- return backlight;
+ return true;
}
-static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
+static bool dmub_abm_set_pause_ex(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst)
{
- union dmub_rb_cmd cmd;
- struct dc_context *dc = abm->ctx;
- struct dc_link *edp_links[MAX_NUM_EDP];
- int i;
- int edp_num;
- uint8_t panel_mask = 0;
-
- dc_get_edp_links(dc->dc, edp_links, &edp_num);
-
- for (i = 0; i < edp_num; i++) {
- if (edp_links[i]->link_status.link_active)
- panel_mask |= (0x01 << i);
- }
+ bool ret = false;
+ unsigned int feature_support;
- memset(&cmd, 0, sizeof(cmd));
- cmd.abm_set_level.header.type = DMUB_CMD__ABM;
- cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
- cmd.abm_set_level.abm_set_level_data.level = level;
- cmd.abm_set_level.abm_set_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
- cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask;
- cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
+ feature_support = abm_feature_support(abm, panel_inst);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ if (feature_support == ABM_LCD_SUPPORT)
+ ret = dmub_abm_set_pause(abm, pause, panel_inst, stream_inst);
- return true;
+ return ret;
}
-static bool dmub_abm_init_config(struct abm *abm,
- const char *src,
- unsigned int bytes,
- unsigned int inst)
+/*****************************************************************************
+ * dmub_abm_save_restore_ex() - calls dmub_abm_save_restore for preserving DMUB's
+ * Varibright states for LCD only. OLED is TBD
+ * @abm: used to check get dc context
+ * @panel_inst: panel instance index
+ * @pData: contains command to pause/un-pause abm and abm parameters
+ *
+ *
+ ***************************************************************************/
+static bool dmub_abm_save_restore_ex(
+ struct abm *abm,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData)
{
- union dmub_rb_cmd cmd;
+ bool ret = false;
+ unsigned int feature_support;
struct dc_context *dc = abm->ctx;
- uint8_t panel_mask = 0x01 << inst;
- // TODO: Optimize by only reading back final 4 bytes
- dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+ feature_support = abm_feature_support(abm, panel_inst);
+
+ if (feature_support == ABM_LCD_SUPPORT)
+ ret = dmub_abm_save_restore(dc, panel_inst, pData);
- // Copy iramtable into cw7
- memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
+ return ret;
+}
- memset(&cmd, 0, sizeof(cmd));
- // Fw will copy from cw7 to fw_state
- cmd.abm_init_config.header.type = DMUB_CMD__ABM;
- cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
- cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
- cmd.abm_init_config.abm_init_config_data.bytes = bytes;
- cmd.abm_init_config.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
- cmd.abm_init_config.abm_init_config_data.panel_mask = panel_mask;
+static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
+{
+ bool ret = false;
+ unsigned int feature_support;
- cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
+ feature_support = abm_feature_support(abm, panel_inst);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ if (feature_support == ABM_LCD_SUPPORT)
+ ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst);
- return true;
+ return ret;
}
-static bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst)
+static bool dmub_abm_set_backlight_level_pwm_ex(struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
+ unsigned int frame_ramp,
+ unsigned int controller_id,
+ unsigned int panel_inst)
{
- union dmub_rb_cmd cmd;
- struct dc_context *dc = abm->ctx;
- uint8_t panel_mask = 0x01 << panel_inst;
+ bool ret = false;
+ unsigned int feature_support;
- memset(&cmd, 0, sizeof(cmd));
- cmd.abm_pause.header.type = DMUB_CMD__ABM;
- cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE;
- cmd.abm_pause.abm_pause_data.enable = pause;
- cmd.abm_pause.abm_pause_data.panel_mask = panel_mask;
- cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data);
+ feature_support = abm_feature_support(abm, panel_inst);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ if (feature_support == ABM_LCD_SUPPORT)
+ ret = dmub_abm_set_backlight_level(abm, backlight_pwm_u16_16, frame_ramp, panel_inst);
- return true;
+ return ret;
}
static const struct abm_funcs abm_funcs = {
- .abm_init = dmub_abm_init,
- .set_abm_level = dmub_abm_set_level,
- .get_current_backlight = dmub_abm_get_current_backlight,
- .get_target_backlight = dmub_abm_get_target_backlight,
- .init_abm_config = dmub_abm_init_config,
- .set_abm_pause = dmub_abm_set_pause,
+ .abm_init = dmub_abm_init_ex,
+ .set_abm_level = dmub_abm_set_level_ex,
+ .get_current_backlight = dmub_abm_get_current_backlight_ex,
+ .get_target_backlight = dmub_abm_get_target_backlight_ex,
+ .init_abm_config = dmub_abm_init_config_ex,
+ .set_abm_pause = dmub_abm_set_pause_ex,
+ .save_restore = dmub_abm_save_restore_ex,
+ .set_pipe_ex = dmub_abm_set_pipe_ex,
+ .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex,
};
static void dmub_abm_construct(
@@ -256,16 +211,19 @@ struct abm *dmub_abm_create(
const struct dce_abm_shift *abm_shift,
const struct dce_abm_mask *abm_mask)
{
- struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+ if (ctx->dc->caps.dmcub_support) {
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
- if (abm_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
+ if (abm_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
- dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
+ dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
- return &abm_dce->base;
+ return &abm_dce->base;
+ }
+ return NULL;
}
void dmub_abm_destroy(struct abm **abm)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
new file mode 100644
index 000000000000..592a8f7a1c6d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_abm.h"
+#include "dmub_abm_lcd.h"
+#include "dce_abm.h"
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
+#include "core_types.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed31_32.h"
+
+#ifdef _WIN32
+#include "atombios.h"
+#else
+#include "atom.h"
+#endif
+
+#define TO_DMUB_ABM(abm)\
+ container_of(abm, struct dce_abm, base)
+
+#define REG(reg) \
+ (dce_abm->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name
+
+#define CTX \
+ dce_abm->base.ctx
+
+#define DISABLE_ABM_IMMEDIATELY 255
+
+
+
+static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
+ uint32_t edp_id_count = dc->dc_edp_id_count;
+ int i;
+ uint8_t panel_mask = 0;
+
+ for (i = 0; i < edp_id_count; i++)
+ panel_mask |= 0x01 << i;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
+ cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
+ cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask;
+ cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dmub_abm_init(struct abm *abm, uint32_t backlight)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x3);
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x1);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x3);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x1);
+ REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x1);
+
+ REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
+ ABM1_HG_NUM_OF_BINS_SEL, 0,
+ ABM1_HG_VMAX_SEL, 1,
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
+
+ REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
+ ABM1_IPCSC_COEFF_SEL_R, 2,
+ ABM1_IPCSC_COEFF_SEL_G, 4,
+ ABM1_IPCSC_COEFF_SEL_B, 2);
+
+ REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
+ BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
+ BL1_PWM_TARGET_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_USER_LEVEL,
+ BL1_PWM_USER_LEVEL, backlight);
+
+ REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+
+ REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+
+ dmub_abm_enable_fractional_pwm(abm->ctx);
+}
+
+unsigned int dmub_abm_get_current_backlight(struct abm *abm)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+unsigned int dmub_abm_get_target_backlight(struct abm *abm)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_set_level.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
+ cmd.abm_set_level.abm_set_level_data.level = level;
+ cmd.abm_set_level.abm_set_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask;
+ cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+void dmub_abm_init_config(struct abm *abm,
+ const char *src,
+ unsigned int bytes,
+ unsigned int inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint8_t panel_mask = 0x01 << inst;
+
+ // TODO: Optimize by only reading back final 4 bytes
+ dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+
+ // Copy iramtable into cw7
+ memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
+
+ memset(&cmd, 0, sizeof(cmd));
+ // Fw will copy from cw7 to fw_state
+ cmd.abm_init_config.header.type = DMUB_CMD__ABM;
+ cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
+ cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.abm_init_config.abm_init_config_data.bytes = bytes;
+ cmd.abm_init_config.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_init_config.abm_init_config_data.panel_mask = panel_mask;
+
+ cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+}
+
+bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint8_t panel_mask = 0x01 << panel_inst;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_pause.header.type = DMUB_CMD__ABM;
+ cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE;
+ cmd.abm_pause.abm_pause_data.enable = pause;
+ cmd.abm_pause.abm_pause_data.panel_mask = panel_mask;
+ cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+
+/*****************************************************************************
+ * dmub_abm_save_restore() - dmub interface for abm save+pause and restore+
+ * un-pause
+ * @dc: dc context
+ * @panel_inst: panel instance index
+ * @pData: contains command to pause/un-pause abm and exchange abm parameters
+ *
+ * When called Pause will get abm data and store in pData, and un-pause will
+ * set/apply abm data stored in pData.
+ *
+ *****************************************************************************/
+bool dmub_abm_save_restore(
+ struct dc_context *dc,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData)
+{
+ union dmub_rb_cmd cmd;
+ uint8_t panel_mask = 0x01 << panel_inst;
+ unsigned int bytes = sizeof(struct abm_save_restore);
+
+ // TODO: Optimize by only reading back final 4 bytes
+ dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+
+ // Copy iramtable into cw7
+ memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)pData, bytes);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_save_restore.header.type = DMUB_CMD__ABM;
+ cmd.abm_save_restore.header.sub_type = DMUB_CMD__ABM_SAVE_RESTORE;
+
+ cmd.abm_save_restore.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.abm_save_restore.abm_init_config_data.bytes = bytes;
+ cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask;
+
+ cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ // Copy iramtable data into local structure
+ memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
+
+ return true;
+}
+
+bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint32_t ramping_boundary = 0xFFFF;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
+ cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option;
+ cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+ cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+bool dmub_abm_set_backlight_level(struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
+ unsigned int frame_ramp,
+ unsigned int panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+ cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
+ cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
+ cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
new file mode 100644
index 000000000000..853564d7f471
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DMUB_ABM_LCD_H__
+#define __DMUB_ABM_LCD_H__
+
+#include "abm.h"
+
+struct abm_save_restore;
+
+void dmub_abm_init(struct abm *abm, uint32_t backlight);
+bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask);
+unsigned int dmub_abm_get_current_backlight(struct abm *abm);
+unsigned int dmub_abm_get_target_backlight(struct abm *abm);
+void dmub_abm_init_config(struct abm *abm,
+ const char *src,
+ unsigned int bytes,
+ unsigned int inst);
+
+bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst);
+bool dmub_abm_save_restore(
+ struct dc_context *dc,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData);
+bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst);
+bool dmub_abm_set_backlight_level(struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
+ unsigned int frame_ramp,
+ unsigned int panel_inst);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index 3f32e9c3fbaf..2aa0e01a6891 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -47,9 +47,7 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
if (!lock)
cmd.lock_hw.lock_hw_data.should_release = 1;
- dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
- dc_dmub_srv_wait_idle(dmub_srv);
+ dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
index fff1d07d865d..d8009b2dc56a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
@@ -48,7 +48,5 @@ void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv)
sizeof(cmd.outbox1_enable.header);
cmd.outbox1_enable.enable = true;
- dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
- dc_dmub_srv_wait_idle(dmub_srv);
+ dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 9705d8f88382..0f24b6fbd220 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -87,6 +87,8 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
state = PSR_STATE4c_FULL_FRAME;
else if (raw_state == 0x4E)
state = PSR_STATE4_FULL_FRAME_POWERUP;
+ else if (raw_state == 0x4F)
+ state = PSR_STATE4_FULL_FRAME_HW_LOCK;
else if (raw_state == 0x60)
state = PSR_STATE_HWLOCK_MGR;
else if (raw_state == 0x61)
@@ -168,9 +170,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst;
cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -198,9 +198,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
cmd.psr_enable.header.payload_bytes = 0; // Send header only
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
/* Below loops 1000 x 500us = 500 ms.
* Exit PSR may need to wait 1-2 frames to power up. Timeout after at
@@ -248,9 +246,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -269,9 +265,7 @@ static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub,
cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle;
cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -290,9 +284,7 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt
cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -422,9 +414,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->relock_delay_frame_cnt = 2;
copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -445,9 +435,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
cmd.psr_enable.header.payload_bytes = 0;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
new file mode 100644
index 000000000000..28149e53c2a6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
+#include "core_types.h"
+#include "dmub_replay.h"
+
+#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+
+#define MAX_PIPES 6
+
+/*
+ * Get Replay state from firmware.
+ */
+static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *state, uint8_t panel_inst)
+{
+ struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+ /* uint32_t raw_state = 0; */
+ uint32_t retry_count = 0;
+ enum dmub_status status;
+
+ do {
+ // Send gpint command and wait for ack
+ status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_REPLAY_STATE, panel_inst, 30);
+
+ if (status == DMUB_STATUS_OK) {
+ // GPINT was executed, get response
+ dmub_srv_get_gpint_response(srv, (uint32_t *)state);
+ } else
+ // Return invalid state when GPINT times out
+ *state = REPLAY_STATE_INVALID;
+ } while (++retry_count <= 1000 && *state == REPLAY_STATE_INVALID);
+
+ // Assert if max retry hit
+ if (retry_count >= 1000 && *state == REPLAY_STATE_INVALID) {
+ ASSERT(0);
+ /* To-do: Add retry fail log */
+ }
+}
+
+/*
+ * Enable/Disable Replay.
+ */
+static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+ uint32_t retry_count;
+ enum replay_state state = REPLAY_STATE_0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
+ cmd.replay_enable.data.panel_inst = panel_inst;
+
+ cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
+ if (enable)
+ cmd.replay_enable.data.enable = REPLAY_ENABLE;
+ else
+ cmd.replay_enable.data.enable = REPLAY_DISABLE;
+
+ cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ /* Below loops 1000 x 500us = 500 ms.
+ * Exit REPLAY may need to wait 1-2 frames to power up. Timeout after at
+ * least a few frames. Should never hit the max retry assert below.
+ */
+ if (wait) {
+ for (retry_count = 0; retry_count <= 1000; retry_count++) {
+ dmub_replay_get_state(dmub, &state, panel_inst);
+
+ if (enable) {
+ if (state != REPLAY_STATE_0)
+ break;
+ } else {
+ if (state == REPLAY_STATE_0)
+ break;
+ }
+
+ fsleep(500);
+ }
+
+ /* assert if max retry hit */
+ if (retry_count >= 1000)
+ ASSERT(0);
+ }
+
+}
+
+/*
+ * Set REPLAY power optimization flags.
+ */
+static void dmub_replay_set_power_opt(struct dmub_replay *dmub, unsigned int power_opt, uint8_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.replay_set_power_opt.header.type = DMUB_CMD__REPLAY;
+ cmd.replay_set_power_opt.header.sub_type = DMUB_CMD__SET_REPLAY_POWER_OPT;
+ cmd.replay_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_power_opt_data);
+ cmd.replay_set_power_opt.replay_set_power_opt_data.power_opt = power_opt;
+ cmd.replay_set_power_opt.replay_set_power_opt_data.panel_inst = panel_inst;
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+/*
+ * Setup Replay by programming phy registers and sending replay hw context values to firmware.
+ */
+static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
+ struct dc_link *link,
+ struct replay_context *replay_context,
+ uint8_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+ struct dmub_cmd_replay_copy_settings_data *copy_settings_data
+ = &cmd.replay_copy_settings.replay_copy_settings_data;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ int i = 0;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ //TODO: refactor for multi edp support
+ break;
+ }
+ }
+
+ if (!pipe_ctx)
+ return false;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.replay_copy_settings.header.type = DMUB_CMD__REPLAY;
+ cmd.replay_copy_settings.header.sub_type = DMUB_CMD__REPLAY_COPY_SETTINGS;
+ cmd.replay_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_replay_copy_settings_data);
+
+ // HW insts
+ copy_settings_data->aux_inst = replay_context->aux_inst;
+ copy_settings_data->digbe_inst = replay_context->digbe_inst;
+ copy_settings_data->digfe_inst = replay_context->digfe_inst;
+
+ if (pipe_ctx->plane_res.dpp)
+ copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
+ else
+ copy_settings_data->dpp_inst = 0;
+ if (pipe_ctx->stream_res.tg)
+ copy_settings_data->otg_inst = pipe_ctx->stream_res.tg->inst;
+ else
+ copy_settings_data->otg_inst = 0;
+
+ copy_settings_data->dpphy_inst = link->link_enc->transmitter;
+
+ // Misc
+ copy_settings_data->line_time_in_ns = replay_context->line_time_in_ns;
+ copy_settings_data->panel_inst = panel_inst;
+ copy_settings_data->debug.u32All = link->replay_settings.config.debug_flags;
+ copy_settings_data->pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line;
+ copy_settings_data->max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
+ copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable;
+ copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported;
+
+ copy_settings_data->flags.u32All = 0;
+ copy_settings_data->flags.bitfields.fec_enable_status = (link->fec_state == dc_link_fec_enabled);
+ copy_settings_data->flags.bitfields.dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1);
+ // WA for PSRSU+DSC on specific TCON, if DSC is enabled, force PSRSU as ffu mode(full frame update)
+ if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
+ !link->dc->debug.disable_fec) &&
+ (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+ !link->panel_config.dsc.disable_dsc_edp &&
+ link->dc->caps.edp_dsc_support)) &&
+ link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 /*&&
+ (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
+ sizeof(DP_SINK_DEVICE_STR_ID_1)) ||
+ !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2,
+ sizeof(DP_SINK_DEVICE_STR_ID_2)))*/)
+ copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 1;
+ else
+ copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
+
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+/*
+ * Set coasting vtotal.
+ */
+static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
+ uint16_t coasting_vtotal,
+ uint8_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.replay_set_coasting_vtotal.header.type = DMUB_CMD__REPLAY;
+ cmd.replay_set_coasting_vtotal.header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
+ cmd.replay_set_coasting_vtotal.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
+ cmd.replay_set_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal;
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+/*
+ * Get Replay residency from firmware.
+ */
+static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
+ uint32_t *residency, const bool is_start, const bool is_alpm)
+{
+ struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+ uint16_t param = (uint16_t)(panel_inst << 8);
+
+ if (is_alpm)
+ param |= REPLAY_RESIDENCY_MODE_ALPM;
+
+ if (is_start)
+ param |= REPLAY_RESIDENCY_ENABLE;
+
+ // Send gpint command and wait for ack
+ dmub_srv_send_gpint_command(srv, DMUB_GPINT__REPLAY_RESIDENCY, param, 30);
+
+ if (!is_start)
+ dmub_srv_get_gpint_response(srv, residency);
+ else
+ *residency = 0;
+}
+
+static const struct dmub_replay_funcs replay_funcs = {
+ .replay_copy_settings = dmub_replay_copy_settings,
+ .replay_enable = dmub_replay_enable,
+ .replay_get_state = dmub_replay_get_state,
+ .replay_set_power_opt = dmub_replay_set_power_opt,
+ .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal,
+ .replay_residency = dmub_replay_residency,
+};
+
+/*
+ * Construct Replay object.
+ */
+static void dmub_replay_construct(struct dmub_replay *replay, struct dc_context *ctx)
+{
+ replay->ctx = ctx;
+ replay->funcs = &replay_funcs;
+}
+
+/*
+ * Allocate and initialize Replay object.
+ */
+struct dmub_replay *dmub_replay_create(struct dc_context *ctx)
+{
+ struct dmub_replay *replay = kzalloc(sizeof(struct dmub_replay), GFP_KERNEL);
+
+ if (replay == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dmub_replay_construct(replay, ctx);
+
+ return replay;
+}
+
+/*
+ * Deallocate Replay object.
+ */
+void dmub_replay_destroy(struct dmub_replay **dmub)
+{
+ kfree(*dmub);
+ *dmub = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
new file mode 100644
index 000000000000..e8385bbf51fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_REPLAY_H_
+#define _DMUB_REPLAY_H_
+
+#include "dc_types.h"
+#include "dmub_cmd.h"
+struct dc_link;
+struct dmub_replay_funcs;
+
+struct dmub_replay {
+ struct dc_context *ctx;
+ const struct dmub_replay_funcs *funcs;
+};
+
+struct dmub_replay_funcs {
+ void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
+ uint8_t panel_inst);
+ void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
+ uint8_t panel_inst);
+ bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
+ struct replay_context *replay_context, uint8_t panel_inst);
+ void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
+ uint8_t panel_inst);
+ void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint16_t coasting_vtotal,
+ uint8_t panel_inst);
+ void (*replay_residency)(struct dmub_replay *dmub,
+ uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm);
+};
+
+struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
+void dmub_replay_destroy(struct dmub_replay **dmub);
+
+
+#endif /* _DMUB_REPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 54805802cbd5..899b25b0bad8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -401,6 +401,10 @@ static const struct dc_plane_cap plane_cap = {
}
};
+static const struct dc_debug_options debug_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -820,7 +824,7 @@ static enum dc_status build_mapped_resource(
struct dc_state *context,
struct dc_stream_state *stream)
{
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1071,6 +1075,7 @@ static bool dce100_resource_construct(
dc->caps.dual_link_dvi = true;
dc->caps.disable_dp_clk_share = true;
dc->caps.extended_aux_timeout_support = false;
+ dc->debug = debug_defaults;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 8d2460d06bce..ad967b58d7be 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -209,9 +209,6 @@ static bool dce110_enable_display_power_gating(
struct dc_context *ctx = dc->ctx;
unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
- return true;
-
if (power_gating == PIPE_GATING_CONTROL_INIT)
cntl = ASIC_PIPE_INIT;
else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
@@ -222,7 +219,7 @@ static bool dce110_enable_display_power_gating(
if (controller_id == underlay_idx)
controller_id = CONTROLLER_ID_UNDERLAY0 - 1;
- if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) {
bp_result = dcb->funcs->enable_disp_power_gating(
dcb, controller_id + 1, cntl);
@@ -780,7 +777,8 @@ void dce110_edp_wait_for_hpd_ready(
dal_gpio_destroy_irq(&hpd);
/* ensure that the panel is detected */
- ASSERT(edp_hpd_high);
+ if (!edp_hpd_high)
+ DC_LOG_DC("%s: wait timed out!\n", __func__);
}
void dce110_edp_power_control(
@@ -1153,6 +1151,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct dtbclk_dto_params dto_params = {0};
int dp_hpo_inst;
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
@@ -1179,7 +1179,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- }
+ } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->disable_symclk_se)
+ dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
+ link_enc->transmitter - TRANSMITTER_UNIPHY_A);
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO: This looks like a bug to me as we are disabling HPO IO when
@@ -1219,7 +1221,8 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
struct dce_hwseq *hws = link->dc->hwseq;
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- hws->funcs.edp_backlight_control(link, false);
+ if (!stream->skip_edp_power_down)
+ hws->funcs.edp_backlight_control(link, false);
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
@@ -1587,6 +1590,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
*/
if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
+ pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false;
}
return DC_OK;
}
@@ -1794,10 +1798,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
hws->funcs.edp_backlight_control(edp_link_with_sink, false);
}
/*resume from S3, no vbios posting, no need to power down again*/
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+
power_down_all_hw_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
if (edp_link_with_sink && !keep_edp_vdd_on)
dc->hwss.edp_power_control(edp_link_with_sink, false);
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
}
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
}
@@ -2015,6 +2022,10 @@ static bool should_enable_fbc(struct dc *dc,
if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled)
return false;
+ /* Replay should not be enabled */
+ if (pipe_ctx->stream->link->replay_settings.replay_feature_enabled)
+ return false;
+
/* Nothing to compress */
if (!pipe_ctx->plane_state)
return false;
@@ -2291,6 +2302,11 @@ enum dc_status dce110_apply_ctx_to_hw(
if (DC_OK != status)
return status;
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (hws->funcs.resync_fifo_dccg_dio)
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+#endif
}
if (dc->fbc_compressor)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index a4a45a6ce61e..1289b9418877 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -424,6 +424,10 @@ static const struct dc_plane_cap plane_cap = {
64
};
+static const struct dc_debug_options debug_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_plane_cap underlay_plane_cap = {
.type = DC_PLANE_TYPE_DCE_UNDERLAY,
.per_pixel_alpha = 1,
@@ -938,7 +942,7 @@ static enum dc_status build_mapped_resource(
struct dc_state *context,
struct dc_stream_state *stream)
{
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1115,13 +1119,15 @@ static enum dc_status dce110_add_stream_to_ctx(
}
static struct pipe_ctx *dce110_acquire_underlay(
- struct dc_state *context,
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream)
+ const struct pipe_ctx *opp_head_pipe)
{
+ struct dc_stream_state *stream = opp_head_pipe->stream;
struct dc *dc = stream->ctx->dc;
struct dce_hwseq *hws = dc->hwseq;
- struct resource_context *res_ctx = &context->res_ctx;
+ struct resource_context *res_ctx = &new_ctx->res_ctx;
unsigned int underlay_idx = pool->underlay_pipe_index;
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx];
@@ -1169,7 +1175,7 @@ static struct pipe_ctx *dce110_acquire_underlay(
stream->timing.h_total,
stream->timing.v_total,
stream->timing.pix_clk_100hz / 10,
- context->stream_count);
+ new_ctx->stream_count);
color_space_to_black_color(dc,
COLOR_SPACE_YCBCR601, &black_color);
@@ -1229,7 +1235,7 @@ static const struct resource_funcs dce110_res_pool_funcs = {
.panel_cntl_create = dce110_panel_cntl_create,
.validate_bandwidth = dce110_validate_bandwidth,
.validate_plane = dce110_validate_plane,
- .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dce110_acquire_underlay,
.add_stream_to_ctx = dce110_add_stream_to_ctx,
.validate_global = dce110_validate_global,
.find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link
@@ -1368,6 +1374,7 @@ static bool dce110_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.is_apu = true;
dc->caps.extended_aux_timeout_support = false;
+ dc->debug = debug_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index 27cbb5b42c7e..6424e7f279dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -288,7 +288,7 @@ bool dce110_timing_generator_program_timing_generator(
uint32_t vsync_offset = dc_crtc_timing->v_border_bottom +
dc_crtc_timing->v_front_porch;
- uint32_t v_sync_start =dc_crtc_timing->v_addressable + vsync_offset;
+ uint32_t v_sync_start = dc_crtc_timing->v_addressable + vsync_offset;
uint32_t hsync_offset = dc_crtc_timing->h_border_right +
dc_crtc_timing->h_front_porch;
@@ -603,7 +603,7 @@ void dce110_timing_generator_program_blanking(
{
uint32_t vsync_offset = timing->v_border_bottom +
timing->v_front_porch;
- uint32_t v_sync_start =timing->v_addressable + vsync_offset;
+ uint32_t v_sync_start = timing->v_addressable + vsync_offset;
uint32_t hsync_offset = timing->h_border_right +
timing->h_front_porch;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
index 19873ee1f78d..0ef9ebb3c1e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
@@ -120,9 +120,6 @@ static bool dce112_enable_display_power_gating(
enum bp_pipe_control_action cntl;
struct dc_context *ctx = dc->ctx;
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
- return true;
-
if (power_gating == PIPE_GATING_CONTROL_INIT)
cntl = ASIC_PIPE_INIT;
else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
@@ -130,7 +127,7 @@ static bool dce112_enable_display_power_gating(
else
cntl = ASIC_PIPE_DISABLE;
- if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) {
bp_result = dcb->funcs->enable_disp_power_gating(
dcb, controller_id + 1, cntl);
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index e179e80667d1..2b20180f1a32 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -429,6 +429,10 @@ static const struct dc_plane_cap plane_cap = {
64
};
+static const struct dc_debug_options debug_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -869,7 +873,7 @@ static enum dc_status build_mapped_resource(
struct dc_state *context,
struct dc_stream_state *stream)
{
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -960,7 +964,7 @@ enum dc_status resource_map_phy_clock_resources(
{
/* acquire new resources */
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(
&context->res_ctx, stream);
if (!pipe_ctx)
@@ -970,10 +974,12 @@ enum dc_status resource_map_phy_clock_resources(
|| dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->clock_source =
dc->res_pool->dp_clock_source;
- else
- pipe_ctx->clock_source = find_matching_pll(
- &context->res_ctx, dc->res_pool,
- stream);
+ else {
+ if (stream && stream->link && stream->link->link_enc)
+ pipe_ctx->clock_source = find_matching_pll(
+ &context->res_ctx, dc->res_pool,
+ stream);
+ }
if (pipe_ctx->clock_source == NULL)
return DC_NO_CLOCK_SOURCE_RESOURCE;
@@ -1239,6 +1245,7 @@ static bool dce112_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dual_link_dvi = true;
dc->caps.extended_aux_timeout_support = false;
+ dc->debug = debug_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index d4afe6c824d2..45e08c4d5861 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -159,9 +159,6 @@ static bool dce120_enable_display_power_gating(
enum bp_pipe_control_action cntl;
struct dc_context *ctx = dc->ctx;
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
- return true;
-
if (power_gating == PIPE_GATING_CONTROL_INIT)
cntl = ASIC_PIPE_INIT;
else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index af631085e88c..18c5a86d2d61 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -526,6 +526,7 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
+ .enable_legacy_fast_update = true,
};
static struct clock_source *dce120_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 5825e6f412bd..061221394ce0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -58,13 +58,13 @@
#include "dce/dce_i2c.h"
/* TODO remove this include */
-#include "dce80_resource.h"
-
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
#endif
+#include "dce80/dce80_resource.h"
+
#ifndef mmDP_DPHY_INTERNAL_CTRL
#define mmDP_DPHY_INTERNAL_CTRL 0x1CDE
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE
@@ -418,6 +418,10 @@ static const struct dc_plane_cap plane_cap = {
}
};
+static const struct dc_debug_options debug_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCE80_REG_LIST()
};
@@ -969,6 +973,7 @@ static bool dce80_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dual_link_dvi = true;
dc->caps.extended_aux_timeout_support = false;
+ dc->debug = debug_defaults;
/*************************************************
* Create resources *
@@ -1369,6 +1374,7 @@ static bool dce83_construct(
dc->caps.max_cursor_size = 128;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.is_apu = true;
+ dc->debug = debug_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 7a00fe525dfb..3538973bd0c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -308,7 +308,10 @@ bool cm_helper_convert_to_custom_float(
#define NUMBER_REGIONS 32
#define NUMBER_SW_SEGMENTS 16
-bool cm_helper_translate_curve_to_hw_format(
+#define DC_LOGGER \
+ ctx->logger
+
+bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params, bool fixpoint)
{
@@ -482,10 +485,18 @@ bool cm_helper_translate_curve_to_hw_format(
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
+
if (fixpoint == true) {
- rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red);
- rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green);
- rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue);
+ uint32_t red_clamp = dc_fixpt_clamp_u0d14(rgb->delta_red);
+ uint32_t green_clamp = dc_fixpt_clamp_u0d14(rgb->delta_green);
+ uint32_t blue_clamp = dc_fixpt_clamp_u0d14(rgb->delta_blue);
+
+ if (red_clamp >> 10 || green_clamp >> 10 || blue_clamp >> 10)
+ DC_LOG_WARNING("Losing delta precision while programming shaper LUT.");
+
+ rgb->delta_red_reg = red_clamp & 0x3ff;
+ rgb->delta_green_reg = green_clamp & 0x3ff;
+ rgb->delta_blue_reg = blue_clamp & 0x3ff;
rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red);
rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green);
rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 3b8cd7410498..0a68b63d6126 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -106,6 +106,7 @@ bool cm_helper_convert_to_custom_float(
bool fixpoint);
bool cm_helper_translate_curve_to_hw_format(
+ struct dc_context *ctx,
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params, bool fixpoint);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index b33955928bd0..5ca9ab8a76e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -39,9 +39,6 @@
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
-#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3
-#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1
-#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10
#define REG(reg)\
dpp->tf_regs->reg
@@ -200,8 +197,7 @@ static void dpp1_dscl_set_lb(
DITHER_EN, 0, /* Dithering enable: Disabled */
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
- }
- else {
+ } else {
/* DSCL caps: pixel data processed in float format */
REG_SET_2(LB_DATA_FORMAT, 0,
INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
@@ -591,18 +587,6 @@ static void dpp1_dscl_set_manual_ratio_init(
static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
const struct rect *recout)
{
- int visual_confirm_on = 0;
- unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT;
-
- if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
- visual_confirm_on = 1;
-
- /* Check bounds to ensure the VC bar height was set to a sane value */
- if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) &&
- (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) {
- visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height;
- }
-
REG_SET_2(RECOUT_START, 0,
/* First pixel of RECOUT in the active OTG area */
RECOUT_START_X, recout->x,
@@ -613,8 +597,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
/* Number of RECOUT horizontal pixels */
RECOUT_WIDTH, recout->width,
/* Number of RECOUT vertical lines */
- RECOUT_HEIGHT, recout->height
- - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height));
+ RECOUT_HEIGHT, recout->height);
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 0b17c2993ca5..09784222cc03 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -690,6 +690,8 @@ struct dcn_hubp_state {
uint32_t primary_surface_addr_hi;
uint32_t primary_meta_addr_lo;
uint32_t primary_meta_addr_hi;
+ uint32_t uclk_pstate_force;
+ uint32_t hubp_cntl;
};
struct dcn10_hubp {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 1c3b6f25a782..9834b75f1837 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1012,31 +1012,29 @@ static void dcn10_reset_back_end_for_pipe(
return;
}
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- link = pipe_ctx->stream->link;
- /* DPMS may already disable or */
- /* dpms_off status is incorrect due to fastboot
- * feature. When system resume from S4 with second
- * screen only, the dpms_off would be true but
- * VBIOS lit up eDP, so check link status too.
- */
- if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- dc->link_srv->set_dpms_off(pipe_ctx);
- else if (pipe_ctx->stream_res.audio)
- dc->hwss.disable_audio_stream(pipe_ctx);
-
- if (pipe_ctx->stream_res.audio) {
- /*disable az_endpoint*/
- pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
-
- /*free audio*/
- if (dc->caps.dynamic_audio == true) {
- /*we have to dynamic arbitrate the audio endpoints*/
- /*we free the resource, need reset is_audio_acquired*/
- update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
- pipe_ctx->stream_res.audio, false);
- pipe_ctx->stream_res.audio = NULL;
- }
+ link = pipe_ctx->stream->link;
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ dc->link_srv->set_dpms_off(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+ if (pipe_ctx->stream_res.audio) {
+ /*disable az_endpoint*/
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ /*free audio*/
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.audio, false);
+ pipe_ctx->stream_res.audio = NULL;
}
}
@@ -1499,54 +1497,32 @@ void dcn10_init_hw(struct dc *dc)
if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-
- REG_WRITE(REFCLK_CNTL, 0);
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
- REG_WRITE(DIO_MEM_PWR_CTRL, 0);
-
- if (!dc->debug.disable_clock_gate) {
- /* enable all DCN clock gating */
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
-
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-
- REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
- }
-
- //Enable ability to power gate / don't force power on permanently
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(hws, true);
-
- return;
- }
-
if (!dcb->funcs->is_accelerated_mode(dcb))
hws->funcs.disable_vga(dc->hwseq);
- hws->funcs.bios_golden_init(dc);
+ if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
+ hws->funcs.bios_golden_init(dc);
+
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (res_pool->dccg && res_pool->hubbub) {
+ if (res_pool->dccg && res_pool->hubbub) {
- (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
- dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
- &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
- (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
- res_pool->ref_clocks.dccg_ref_clock_inKhz,
- &res_pool->ref_clocks.dchub_ref_clock_inKhz);
- } else {
- // Not all ASICs have DCCG sw component
- res_pool->ref_clocks.dccg_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- res_pool->ref_clocks.dchub_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- }
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
}
} else
ASSERT_CRITICAL(false);
@@ -1867,7 +1843,7 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
* update.
*/
- else if (cm_helper_translate_curve_to_hw_format(
+ else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
stream->out_transfer_func,
&dpp->regamma_params, false)) {
dpp->funcs->dpp_program_regamma_pwl(
@@ -1923,6 +1899,11 @@ void dcn10_pipe_control_lock(
*
* TODO: Optimize cursor programming to be once per frame before VUPDATE
* to avoid the need for this workaround.
+ *
+ * @dc: Current DC state
+ * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
+ *
+ * Return: void
*/
static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
@@ -2600,23 +2581,15 @@ static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
}
-void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
+void dcn10_update_visual_confirm_color(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ int mpcc_id)
{
struct mpc *mpc = dc->res_pool->mpc;
- if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
- get_hdr_visual_confirm_color(pipe_ctx, color);
- else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
- get_surface_visual_confirm_color(pipe_ctx, color);
- else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
- get_surface_tile_visual_confirm_color(pipe_ctx, color);
- else
- color_space_to_black_color(
- dc, pipe_ctx->stream->output_color_space, color);
-
if (mpc->funcs->set_bg_color) {
- memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
- mpc->funcs->set_bg_color(mpc, color, mpcc_id);
+ memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
+ mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
}
}
@@ -2669,7 +2642,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
/* If there is no full update, don't need to touch MPC tree*/
if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
- dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
return;
}
@@ -2691,7 +2664,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
NULL,
hubp->inst,
mpcc_id);
- dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
ASSERT(new_mpcc != NULL);
hubp->opp_id = pipe_ctx->stream_res.opp->inst;
@@ -3076,15 +3049,13 @@ void dcn10_prepare_bandwidth(
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (context->stream_count == 0)
- context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
+ if (context->stream_count == 0)
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
- dc->clk_mgr->funcs->update_clocks(
- dc->clk_mgr,
- context,
- false);
- }
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ false);
dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
@@ -3116,15 +3087,13 @@ void dcn10_optimize_bandwidth(
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (context->stream_count == 0)
- context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
+ if (context->stream_count == 0)
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
- dc->clk_mgr->funcs->update_clocks(
- dc->clk_mgr,
- context,
- true);
- }
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ true);
hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
@@ -3309,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect(
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
- if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+ if (pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 0ef7bf7ddb75..ef6d56da417c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -202,7 +202,6 @@ void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits);
void dcn10_update_visual_confirm_color(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
- struct tg_color *color,
int mpcc_id);
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index a0f8e31d2adc..46a2ebcabd1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -45,7 +45,8 @@
#include "dcn10_cm_common.h"
#include "clk_mgr.h"
-unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
+__printf(3, 4)
+unsigned int snprintf_count(char *pbuf, unsigned int bufsize, char *fmt, ...)
{
int ret_vsnprintf;
unsigned int chars_printed;
@@ -53,15 +54,15 @@ unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
va_list args;
va_start(args, fmt);
- ret_vsnprintf = vsnprintf(pBuf, bufSize, fmt, args);
+ ret_vsnprintf = vsnprintf(pbuf, bufsize, fmt, args);
va_end(args);
if (ret_vsnprintf > 0) {
- if (ret_vsnprintf < bufSize)
+ if (ret_vsnprintf < bufsize)
chars_printed = ret_vsnprintf;
else
- chars_printed = bufSize - 1;
+ chars_printed = bufsize - 1;
} else
chars_printed = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index ee08b545aaea..377f1ba1a81b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1056,7 +1056,7 @@ void dcn10_link_encoder_disable_output(
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
- if (!dcn10_is_dig_enabled(enc)) {
+ if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
/*in DP_Alt_No_Connect case, we turn off the dig already,
after excuation the PHY w/a sequence, not allow touch PHY any more*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 8e9384094f6d..f2f55565e98a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -212,8 +212,9 @@ struct mpcc *mpc1_insert_plane(
/* check insert_above_mpcc exist in tree->opp_list */
struct mpcc *temp_mpcc = tree->opp_list;
- while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
- temp_mpcc = temp_mpcc->mpcc_bot;
+ if (temp_mpcc != insert_above_mpcc)
+ while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+ temp_mpcc = temp_mpcc->mpcc_bot;
if (temp_mpcc == NULL)
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 41cec7acf51f..0dec57679269 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -37,14 +37,14 @@
#define CTX \
oppn10->base.ctx
-
-/************* FORMATTER ************/
-
/**
- * set_truncation
+ * opp1_set_truncation():
* 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
* 2) enable truncation
* 3) HW remove 12bit FMT support for DCE11 power saving reason.
+ *
+ * @oppn10: output_pixel_processor struct instance for dcn10.
+ * @params: pointer to bit_depth_reduction_params.
*/
static void opp1_set_truncation(
struct dcn10_opp *oppn10,
@@ -149,11 +149,12 @@ void opp1_program_bit_depth_reduction(
}
/**
- * set_pixel_encoding
- *
- * Set Pixel Encoding
+ * opp1_set_pixel_encoding():
* 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
* 1: YCbCr 4:2:2
+ *
+ * @oppn10: output_pixel_processor struct instance for dcn10.
+ * @params: pointer to clamping_and_pixel_encoding_params.
*/
static void opp1_set_pixel_encoding(
struct dcn10_opp *oppn10,
@@ -180,13 +181,16 @@ static void opp1_set_pixel_encoding(
}
/**
- * Set Clamping
+ * opp1_set_clamping():
* 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
* 1 for 8 bpc
* 2 for 10 bpc
* 3 for 12 bpc
* 7 for programable
* 2) Enable clamp if Limited range requested
+ *
+ * @oppn10: output_pixel_processor struct instance for dcn10.
+ * @params: pointer to clamping_and_pixel_encoding_params.
*/
static void opp1_set_clamping(
struct dcn10_opp *oppn10,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index c9e53dc49c92..0e8f4f36c87c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -42,11 +42,13 @@
#define STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN 0x100
/**
-* apply_front_porch_workaround TODO FPGA still need?
-*
-* This is a workaround for a bug that has existed since R5xx and has not been
-* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
-*/
+ * apply_front_porch_workaround() - This is a workaround for a bug that has
+ * existed since R5xx and has not been fixed
+ * keep Front porch at minimum 2 for Interlaced
+ * mode or 1 for progressive.
+ *
+ * @timing: Timing parameters used to configure DCN blocks.
+ */
static void apply_front_porch_workaround(struct dc_crtc_timing *timing)
{
if (timing->flags.INTERLACE == 1) {
@@ -133,9 +135,20 @@ void optc1_setup_vertical_interrupt2(
}
/**
- * program_timing_generator used by mode timing set
- * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
- * Including SYNC. Call BIOS command table to program Timings.
+ * optc1_program_timing() - used by mode timing set Program
+ * CRTC Timing Registers - OTG_H_*,
+ * OTG_V_*, Pixel repetition.
+ * Including SYNC. Call BIOS command table to program Timings.
+ *
+ * @optc: timing_generator instance.
+ * @dc_crtc_timing: Timing parameters used to configure DCN blocks.
+ * @vready_offset: Vready's starting position.
+ * @vstartup_start: Vstartup period.
+ * @vupdate_offset: Vupdate starting position.
+ * @vupdate_width: Vupdate duration.
+ * @signal: DC signal types.
+ * @use_vbios: to program timings from BIOS command table.
+ *
*/
void optc1_program_timing(
struct timing_generator *optc,
@@ -385,6 +398,9 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab
* Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN,
* VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers.
*
+ * @optc: timing_generator instance.
+ * @enable: Enable DRR double buffering control if true, disable otherwise.
+ *
* Options: any time, start of frame, dp start of frame (range timing)
*/
void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable)
@@ -397,8 +413,9 @@ void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable)
}
/**
- * unblank_crtc
- * Call ASIC Control Object to UnBlank CRTC.
+ * optc1_unblank_crtc() - Call ASIC Control Object to UnBlank CRTC.
+ *
+ * @optc: timing_generator instance.
*/
static void optc1_unblank_crtc(struct timing_generator *optc)
{
@@ -419,8 +436,9 @@ static void optc1_unblank_crtc(struct timing_generator *optc)
}
/**
- * blank_crtc
- * Call ASIC Control Object to Blank CRTC.
+ * optc1_blank_crtc() - Call ASIC Control Object to Blank CRTC.
+ *
+ * @optc: timing_generator instance.
*/
static void optc1_blank_crtc(struct timing_generator *optc)
@@ -493,8 +511,9 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
}
/**
- * Enable CRTC
- * Enable CRTC - call ASIC Control Object to enable Timing generator.
+ * optc1_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator.
+ *
+ * @optc: timing_generator instance.
*/
static bool optc1_enable_crtc(struct timing_generator *optc)
{
@@ -653,11 +672,9 @@ void optc1_lock(struct timing_generator *optc)
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 1);
- /* Should be fast, status does not update on maximus */
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
- REG_WAIT(OTG_MASTER_UPDATE_LOCK,
- UPDATE_LOCK_STATUS, 1,
- 1, 10);
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 10);
TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true);
}
@@ -892,15 +909,11 @@ static void optc1_program_manual_trigger(struct timing_generator *optc)
MANUAL_FLOW_CONTROL, 0);
}
-
/**
- *****************************************************************************
- * Function: set_drr
+ * optc1_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
*
- * @brief
- * Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
- *
- *****************************************************************************
+ * @optc: timing_generator instance.
+ * @params: parameters used for Dynamic Refresh Rate.
*/
void optc1_set_drr(
struct timing_generator *optc,
@@ -932,19 +945,10 @@ void optc1_set_drr(
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
OTG_SET_V_TOTAL_MIN_MASK, 0);
-
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
-
- } else {
- REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
- OTG_SET_V_TOTAL_MIN_MASK, 0,
- OTG_V_TOTAL_MIN_SEL, 0,
- OTG_V_TOTAL_MAX_SEL, 0,
- OTG_FORCE_LOCK_ON_EVENT, 0);
-
- optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
+
+ // Setup manual flow control for EOF via TRIG_A
+ optc->funcs->setup_manual_trigger(optc);
}
void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 21ec1ba5ed75..9f9145742f14 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -553,6 +553,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.recovery_enabled = false, /*enable this by default after testing.*/
.max_downscale_src_width = 3840,
.underflow_assert_delay_us = 0xFFFFFFFF,
+ .enable_legacy_fast_update = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -886,13 +887,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn10_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hwseq = dcn10_hwseq_create,
-};
-
static void dcn10_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
@@ -1061,7 +1055,7 @@ static enum dc_status build_mapped_resource(
struct dc_state *context,
struct dc_stream_state *stream)
{
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1089,14 +1083,15 @@ static enum dc_status dcn10_add_stream_to_ctx(
return result;
}
-static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
- struct dc_state *context,
+static struct pipe_ctx *dcn10_acquire_free_pipe_for_layer(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream)
+ const struct pipe_ctx *opp_head_pipe)
{
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
+ struct resource_context *res_ctx = &new_ctx->res_ctx;
+ struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream);
+ struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe);
if (!head_pipe) {
ASSERT(0);
@@ -1277,7 +1272,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.link_enc_create = dcn10_link_encoder_create,
.panel_cntl_create = dcn10_panel_cntl_create,
.validate_bandwidth = dcn10_validate_bandwidth,
- .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn10_acquire_free_pipe_for_layer,
.validate_plane = dcn10_validate_plane,
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
@@ -1651,9 +1646,8 @@ static bool dcn10_resource_construct(
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
- goto fail;
+ &res_create_funcs))
+ goto fail;
dcn10_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 7bdc146f7cb5..c8602bcfa393 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -208,7 +208,9 @@
#define DCCG314_REG_FIELD_LIST(type) \
type DSCCLK3_DTO_PHASE;\
type DSCCLK3_DTO_MODULO;\
- type DSCCLK3_DTO_ENABLE;
+ type DSCCLK3_DTO_ENABLE;\
+ type DENTIST_DISPCLK_RDIVIDER;\
+ type DENTIST_DISPCLK_WDIVIDER;
#define DCCG32_REG_FIELD_LIST(type) \
type DPSTREAMCLK0_EN;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 5bd698cd6d20..5eebe7f03ddc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -30,22 +30,13 @@
#include "dsc/dscc_types.h"
#include "dsc/rc_calc.h"
-static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps);
-static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
- struct dsc_optc_config *dsc_optc_cfg);
-static void dsc_init_reg_values(struct dsc_reg_values *reg_vals);
-static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params);
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals);
-static enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple);
-static enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth);
/* Object I/F functions */
-static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
-static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe);
static void dsc2_disable(struct display_stream_compressor *dsc);
static void dsc2_disconnect(struct display_stream_compressor *dsc);
@@ -108,7 +99,7 @@ void dsc2_construct(struct dcn20_dsc *dsc,
/* This returns the capabilities for a single DSC encoder engine. Number of slices and total throughput
* can be doubled, tripled etc. by using additional DSC engines.
*/
-static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
+void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
{
dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
@@ -184,7 +175,7 @@ static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const st
}
-static void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config)
+void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config)
{
DC_LOG_DSC("\tnum_slices_h %d", config->dc_dsc_cfg.num_slices_h);
DC_LOG_DSC("\tnum_slices_v %d", config->dc_dsc_cfg.num_slices_v);
@@ -211,7 +202,7 @@ static void dsc2_set_config(struct display_stream_compressor *dsc, const struct
}
-static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps)
+bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps)
{
bool is_config_ok;
struct dsc_reg_values dsc_reg_vals;
@@ -291,7 +282,7 @@ static void dsc2_disconnect(struct display_stream_compressor *dsc)
}
/* This module's internal functions */
-static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps)
+void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps)
{
int i;
int bits_per_pixel = pps->bits_per_pixel;
@@ -345,7 +336,7 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co
}
}
-static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override)
+void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override)
{
uint8_t i;
@@ -372,7 +363,7 @@ static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_
rc->flatness_det_thresh = override->flatness_det_thresh;
}
-static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
+bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
struct dsc_optc_config *dsc_optc_cfg)
{
struct dsc_parameters dsc_params;
@@ -463,7 +454,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_
}
-static enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple)
+enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple)
{
enum dsc_pixel_format dsc_pix_fmt = DSC_PIXFMT_UNKNOWN;
@@ -495,7 +486,7 @@ static enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_p
}
-static enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth)
+enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth)
{
enum dsc_bits_per_comp bpc = DSC_BPC_UNKNOWN;
@@ -518,7 +509,7 @@ static enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_co
}
-static void dsc_init_reg_values(struct dsc_reg_values *reg_vals)
+void dsc_init_reg_values(struct dsc_reg_values *reg_vals)
{
int i;
@@ -574,7 +565,7 @@ static void dsc_init_reg_values(struct dsc_reg_values *reg_vals)
* This is required because dscc_compute_dsc_parameters returns a modified PPS, which in turn
* affects non-PPS register values.
*/
-static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params)
+void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params)
{
int i;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index 7ce64a3c1b02..ba869387c3c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -549,6 +549,27 @@ struct dcn20_dsc {
int max_image_width;
};
+void dsc_config_log(struct display_stream_compressor *dsc,
+ const struct dsc_config *config);
+
+void dsc_log_pps(struct display_stream_compressor *dsc,
+ struct drm_dsc_config *pps);
+
+void dsc_override_rc_params(struct rc_params *rc,
+ const struct dc_dsc_rc_params_override *override);
+
+bool dsc_prepare_config(const struct dsc_config *dsc_cfg,
+ struct dsc_reg_values *dsc_reg_vals,
+ struct dsc_optc_config *dsc_optc_cfg);
+
+enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc,
+ bool is_ycbcr422_simple);
+
+enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth);
+
+void dsc_init_reg_values(struct dsc_reg_values *reg_vals);
+
+void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params);
void dsc2_construct(struct dcn20_dsc *dsc,
struct dc_context *ctx,
@@ -557,5 +578,12 @@ void dsc2_construct(struct dcn20_dsc *dsc,
const struct dcn20_dsc_shift *dsc_shift,
const struct dcn20_dsc_mask *dsc_mask);
+void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps,
+ int pixel_clock_100Hz);
+
+bool dsc2_get_packed_pps(struct display_stream_compressor *dsc,
+ const struct dsc_config *dsc_cfg,
+ uint8_t *dsc_packed_pps);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 24bd93219936..6eebcb22e317 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -623,6 +623,17 @@ void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_s
REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, &hubbub_state->vm_error_vmid);
REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, &hubbub_state->vm_error_pipe);
}
+
+ if (REG(DCHUBBUB_TEST_DEBUG_INDEX) && REG(DCHUBBUB_TEST_DEBUG_DATA)) {
+ REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, 0x6);
+ hubbub_state->test_debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
+ }
+
+ if (REG(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL))
+ hubbub_state->watermark_change_cntl = REG_READ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL);
+
+ if (REG(DCHUBBUB_ARB_DRAM_STATE_CNTL))
+ hubbub_state->dram_state_cntl = REG_READ(DCHUBBUB_ARB_DRAM_STATE_CNTL);
}
static const struct hubbub_funcs hubbub2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 422fbf79da64..65fa9e21ad9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -313,6 +313,10 @@ void dcn20_init_blank(
}
opp = dc->res_pool->opps[opp_id_src0];
+ /* don't override the blank pattern if already enabled with the correct one. */
+ if (opp->funcs->dpg_is_blanked && opp->funcs->dpg_is_blanked(opp))
+ return;
+
if (num_opps == 2) {
otg_active_width = otg_active_width / 2;
@@ -863,7 +867,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
params = &stream->out_transfer_func->pwl;
else if (pipe_ctx->stream->out_transfer_func->type ==
TF_TYPE_DISTRIBUTED_POINTS &&
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(dc->ctx,
stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
@@ -892,7 +896,7 @@ bool dcn20_set_blend_lut(
if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
blend_lut = &plane_state->blend_tf->pwl;
else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->blend_tf,
&dpp_base->regamma_params, false);
blend_lut = &dpp_base->regamma_params;
@@ -914,7 +918,7 @@ bool dcn20_set_shaper_3dlut(
if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
shaper_lut = &plane_state->in_shaper_func->pwl;
else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->in_shaper_func,
&dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
@@ -1050,9 +1054,9 @@ void dcn20_blank_pixel_data(
enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
struct pipe_ctx *odm_pipe;
int odm_cnt = 1;
-
- int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
- int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ int h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ int v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+ int odm_slice_width, last_odm_slice_width, offset = 0;
if (stream->link->test_pattern_enabled)
return;
@@ -1062,8 +1066,8 @@ void dcn20_blank_pixel_data(
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_cnt++;
-
- width = width / odm_cnt;
+ odm_slice_width = h_active / odm_cnt;
+ last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
if (blank) {
dc->hwss.set_abm_immediate_disable(pipe_ctx);
@@ -1076,29 +1080,32 @@ void dcn20_blank_pixel_data(
test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
}
- dc->hwss.set_disp_pattern_generator(dc,
- pipe_ctx,
- test_pattern,
- test_pattern_color_space,
- stream->timing.display_color_depth,
- &black_color,
- width,
- height,
- 0);
+ odm_pipe = pipe_ctx;
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ while (odm_pipe->next_odm_pipe) {
dc->hwss.set_disp_pattern_generator(dc,
odm_pipe,
- dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?
- CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern,
+ test_pattern,
test_pattern_color_space,
stream->timing.display_color_depth,
&black_color,
- width,
- height,
- 0);
+ odm_slice_width,
+ v_active,
+ offset);
+ offset += odm_slice_width;
+ odm_pipe = odm_pipe->next_odm_pipe;
}
+ dc->hwss.set_disp_pattern_generator(dc,
+ odm_pipe,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ &black_color,
+ last_odm_slice_width,
+ v_active,
+ offset);
+
if (!blank && dc->debug.enable_single_display_2to1_odm_policy) {
/* when exiting dynamic ODM need to reinit DPG state for unused pipes */
struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe;
@@ -1262,20 +1269,21 @@ void dcn20_pipe_control_lock(
}
if (flip_immediate && lock) {
- const int TIMEOUT_FOR_FLIP_PENDING = 100000;
+ const int TIMEOUT_FOR_FLIP_PENDING_US = 100000;
+ unsigned int polling_interval_us = 1;
int i;
temp_pipe = pipe;
while (temp_pipe) {
if (temp_pipe->plane_state && temp_pipe->plane_state->flip_immediate) {
- for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
+ for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING_US / polling_interval_us; ++i) {
if (!temp_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(temp_pipe->plane_res.hubp))
break;
- udelay(1);
+ udelay(polling_interval_us);
}
/* no reason it should take this long for immediate flips */
- ASSERT(i != TIMEOUT_FOR_FLIP_PENDING);
+ ASSERT(i != TIMEOUT_FOR_FLIP_PENDING_US);
}
temp_pipe = temp_pipe->bottom_pipe;
}
@@ -1357,6 +1365,7 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
new_pipe->update_flags.bits.dppclk = 1;
new_pipe->update_flags.bits.hubp_interdependent = 1;
new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.unbounded_req = 1;
new_pipe->update_flags.bits.gamut_remap = 1;
new_pipe->update_flags.bits.scaler = 1;
new_pipe->update_flags.bits.viewport = 1;
@@ -1500,6 +1509,9 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
}
+
+ if (old_pipe->unbounded_req != new_pipe->unbounded_req)
+ new_pipe->update_flags.bits.unbounded_req = 1;
}
static void dcn20_update_dchubp_dpp(
@@ -1533,10 +1545,11 @@ static void dcn20_update_dchubp_dpp(
&pipe_ctx->ttu_regs,
&pipe_ctx->rq_regs,
&pipe_ctx->pipe_dlg_param);
-
- if (hubp->funcs->set_unbounded_requesting)
- hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
}
+
+ if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
+ hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
+
if (pipe_ctx->update_flags.bits.hubp_interdependent)
hubp->funcs->hubp_setup_interdependent(
hubp,
@@ -1625,6 +1638,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
|| pipe_ctx->update_flags.bits.plane_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
+ || plane_state->update_flags.bits.gamut_remap_change
|| pipe_ctx->stream->update_flags.bits.out_csc) {
/* dpp/cm gamut remap*/
dc->hwss.program_gamut_remap(pipe_ctx);
@@ -1755,8 +1769,9 @@ static void dcn20_program_pipe(
hws->funcs.set_hdr_multiplier(pipe_ctx);
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d)
hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
@@ -1939,7 +1954,8 @@ void dcn20_post_unlock_program_front_end(
struct dc_state *context)
{
int i;
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_US = 100000;
+ unsigned int polling_interval_us = 1;
struct dce_hwseq *hwseq = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -1961,10 +1977,9 @@ void dcn20_post_unlock_program_front_end(
pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
-
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
- udelay(1);
+ udelay(polling_interval_us);
}
}
@@ -2126,7 +2141,7 @@ void dcn20_optimize_bandwidth(
dc->clk_mgr,
context,
true);
- if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+ if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -2134,7 +2149,7 @@ void dcn20_optimize_bandwidth(
&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
- pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
+ pipe_ctx->dlg_regs.min_dst_y_next_start);
}
}
}
@@ -2471,36 +2486,31 @@ static void dcn20_reset_back_end_for_pipe(
return;
}
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- /* DPMS may already disable or */
- /* dpms_off status is incorrect due to fastboot
- * feature. When system resume from S4 with second
- * screen only, the dpms_off would be true but
- * VBIOS lit up eDP, so check link status too.
- */
- if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- dc->link_srv->set_dpms_off(pipe_ctx);
- else if (pipe_ctx->stream_res.audio)
- dc->hwss.disable_audio_stream(pipe_ctx);
-
- /* free acquired resources */
- if (pipe_ctx->stream_res.audio) {
- /*disable az_endpoint*/
- pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
-
- /*free audio*/
- if (dc->caps.dynamic_audio == true) {
- /*we have to dynamic arbitrate the audio endpoints*/
- /*we free the resource, need reset is_audio_acquired*/
- update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
- pipe_ctx->stream_res.audio, false);
- pipe_ctx->stream_res.audio = NULL;
- }
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ dc->link_srv->set_dpms_off(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+ /* free acquired resources */
+ if (pipe_ctx->stream_res.audio) {
+ /*disable az_endpoint*/
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ /*free audio*/
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.audio, false);
+ pipe_ctx->stream_res.audio = NULL;
}
}
- else if (pipe_ctx->stream_res.dsc) {
- dc->link_srv->set_dsc_enable(pipe_ctx, false);
- }
/* by upper caller loop, parent pipe: pipe0, will be reset last.
* back end share by all pipes and will be disable only when disable
@@ -2576,28 +2586,6 @@ void dcn20_reset_hw_ctx_wrap(
}
}
-void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
-{
- struct mpc *mpc = dc->res_pool->mpc;
-
- // input to MPCC is always RGB, by default leave black_color at 0
- if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
- get_hdr_visual_confirm_color(pipe_ctx, color);
- else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
- get_surface_visual_confirm_color(pipe_ctx, color);
- else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
- get_mpctree_visual_confirm_color(pipe_ctx, color);
- else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
- get_surface_tile_visual_confirm_color(pipe_ctx, color);
- else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
- get_subvp_visual_confirm_color(dc, pipe_ctx, color);
-
- if (mpc->funcs->set_bg_color) {
- memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
- mpc->funcs->set_bg_color(mpc, color, mpcc_id);
- }
-}
-
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -2653,7 +2641,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
!pipe_ctx->update_flags.bits.mpcc) {
mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
- dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
return;
}
@@ -2675,7 +2663,7 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
NULL,
hubp->inst,
mpcc_id);
- dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
ASSERT(new_mpcc != NULL);
hubp->opp_id = pipe_ctx->stream_res.opp->inst;
@@ -2733,6 +2721,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
struct dce_hwseq *hws = dc->hwseq;
unsigned int k1_div = PIXEL_RATE_DIV_NA;
unsigned int k2_div = PIXEL_RATE_DIV_NA;
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (dc->hwseq->funcs.setup_hpo_hw_control)
@@ -2752,7 +2742,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- }
+ } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->enable_symclk_se)
+ dccg->funcs->enable_symclk_se(dccg,
+ stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A);
if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 33a36c02b2f8..01901b08644c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -150,10 +150,5 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc,
const struct tg_color *solid_color,
int width, int height, int offset);
-void dcn20_update_visual_confirm_color(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct tg_color *color,
- int mpcc_id);
-
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 7c5817c426fa..e4b44e691ce6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -102,7 +102,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
@@ -145,8 +145,4 @@ void dcn20_hw_sequencer_construct(struct dc *dc)
dc->hwss = dcn20_funcs;
dc->hwseq->funcs = dcn20_private_funcs;
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- dc->hwss.init_hw = dcn20_fpga_init_hw;
- dc->hwseq->funcs.init_pipes = NULL;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index a08c335b7383..58bdbd859bf9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -38,8 +38,12 @@
optc1->tg_shift->field_name, optc1->tg_mask->field_name
/**
- * Enable CRTC
- * Enable CRTC - call ASIC Control Object to enable Timing generator.
+ * optc2_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator.
+ *
+ * @optc: timing_generator instance.
+ *
+ * Return: If CRTC is enabled, return true.
+ *
*/
bool optc2_enable_crtc(struct timing_generator *optc)
{
@@ -73,15 +77,18 @@ bool optc2_enable_crtc(struct timing_generator *optc)
}
/**
- *For the below, I'm not sure how your GSL parameters are stored in your env,
- * so I will assume a gsl_params struct for now
+ * optc2_set_gsl() - Assign OTG to GSL groups,
+ * set one of the OTGs to be master & rest are slaves
+ *
+ * @optc: timing_generator instance.
+ * @params: pointer to gsl_params
*/
void optc2_set_gsl(struct timing_generator *optc,
const struct gsl_params *params)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
-/**
+/*
* There are (MAX_OPTC+1)/2 gsl groups available for use.
* In each group (assign an OTG to a group by setting OTG_GSLX_EN = 1,
* set one of the OTGs to be the master (OTG_GSL_MASTER_EN = 1) and the rest are slaves.
@@ -391,10 +398,9 @@ void optc2_triplebuffer_lock(struct timing_generator *optc)
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 1);
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
- REG_WAIT(OTG_MASTER_UPDATE_LOCK,
- UPDATE_LOCK_STATUS, 1,
- 1, 10);
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 10);
}
void optc2_triplebuffer_unlock(struct timing_generator *optc)
@@ -456,6 +462,16 @@ void optc2_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ /* Set the min/max selectors unconditionally so that
+ * DMCUB fw may change OTG timings when necessary
+ * TODO: Remove the w/a after fixing the issue in DMCUB firmware
+ */
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
+
REG_SET_8(OTG_TRIGA_CNTL, 0,
OTG_TRIGA_SOURCE_SELECT, 21,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 1d8c5805ef20..d587f807dfd7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -722,22 +722,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
-};
-
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = false,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = true,
- .scl_reset_length10 = true,
- .underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_tri_buf = true,
+ .enable_legacy_fast_update = true,
};
void dcn20_dpp_destroy(struct dpp **dpp)
@@ -1066,13 +1051,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn20_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hwseq = dcn20_hwseq_create,
-};
-
static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
void dcn20_clock_source_destroy(struct clock_source **clk_src)
@@ -1316,7 +1294,7 @@ static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
{
enum dc_status status = DC_OK;
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+ struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1970,7 +1948,7 @@ int dcn20_validate_apply_pipe_split_flags(
v->ODMCombineEnablePerState[vlevel][pipe_plane];
if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
- if (get_num_mpc_splits(pipe) == 1) {
+ if (resource_get_num_mpc_splits(pipe) == 1) {
/*If need split for mpc but 2 way split already*/
if (split[i] == 4)
split[i] = 2; /* 2 -> 4 MPC */
@@ -1978,7 +1956,7 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = 0; /* 2 -> 2 MPC */
else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
merge[i] = true; /* 2 -> 1 MPC */
- } else if (get_num_mpc_splits(pipe) == 3) {
+ } else if (resource_get_num_mpc_splits(pipe) == 3) {
/*If need split for mpc but 4 way split already*/
if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
|| !pipe->bottom_pipe)) {
@@ -1987,7 +1965,7 @@ int dcn20_validate_apply_pipe_split_flags(
pipe->top_pipe->plane_state == pipe->plane_state)
merge[i] = true; /* 4 -> 1 MPC */
split[i] = 0;
- } else if (get_num_odm_splits(pipe)) {
+ } else if (resource_get_num_odm_splits(pipe)) {
/* ODM -> MPC transition */
if (pipe->prev_odm_pipe) {
split[i] = 0;
@@ -1995,7 +1973,7 @@ int dcn20_validate_apply_pipe_split_flags(
}
}
} else {
- if (get_num_odm_splits(pipe) == 1) {
+ if (resource_get_num_odm_splits(pipe) == 1) {
/*If need split for odm but 2 way split already*/
if (split[i] == 4)
split[i] = 2; /* 2 -> 4 ODM */
@@ -2005,7 +1983,7 @@ int dcn20_validate_apply_pipe_split_flags(
ASSERT(0); /* NOT expected yet */
merge[i] = true; /* exit ODM */
}
- } else if (get_num_odm_splits(pipe) == 3) {
+ } else if (resource_get_num_odm_splits(pipe) == 3) {
/*If need split for odm but 4 way split already*/
if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
|| !pipe->next_odm_pipe)) {
@@ -2015,7 +1993,7 @@ int dcn20_validate_apply_pipe_split_flags(
merge[i] = true; /* exit ODM */
}
split[i] = 0;
- } else if (get_num_mpc_splits(pipe)) {
+ } else if (resource_get_num_mpc_splits(pipe)) {
/* MPC -> ODM transition */
ASSERT(0); /* NOT expected yet */
if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
@@ -2169,31 +2147,31 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
return voltage_supported;
}
-struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
- struct dc_state *state,
+struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream)
+ const struct pipe_ctx *opp_head)
{
- struct resource_context *res_ctx = &state->res_ctx;
- struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
+ struct resource_context *res_ctx = &new_ctx->res_ctx;
+ struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(res_ctx, opp_head->stream);
+ struct pipe_ctx *sec_dpp_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, otg_master);
- if (!head_pipe)
- ASSERT(0);
+ ASSERT(otg_master);
- if (!idle_pipe)
+ if (!sec_dpp_pipe)
return NULL;
- idle_pipe->stream = head_pipe->stream;
- idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
- idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
+ sec_dpp_pipe->stream = opp_head->stream;
+ sec_dpp_pipe->stream_res.tg = opp_head->stream_res.tg;
+ sec_dpp_pipe->stream_res.opp = opp_head->stream_res.opp;
- idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
+ sec_dpp_pipe->plane_res.hubp = pool->hubps[sec_dpp_pipe->pipe_idx];
+ sec_dpp_pipe->plane_res.ipp = pool->ipps[sec_dpp_pipe->pipe_idx];
+ sec_dpp_pipe->plane_res.dpp = pool->dpps[sec_dpp_pipe->pipe_idx];
+ sec_dpp_pipe->plane_res.mpcc_inst = pool->dpps[sec_dpp_pipe->pipe_idx]->inst;
- return idle_pipe;
+ return sec_dpp_pipe;
}
bool dcn20_get_dcc_compression_cap(const struct dc *dc,
@@ -2238,7 +2216,7 @@ static const struct resource_funcs dcn20_res_pool_funcs = {
.link_enc_create = dcn20_link_encoder_create,
.panel_cntl_create = dcn20_panel_cntl_create,
.validate_bandwidth = dcn20_validate_bandwidth,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -2488,15 +2466,9 @@ static bool dcn20_resource_construct(
dc->caps.dp_hdmi21_pcon_support = true;
- if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- } else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
- pool->base.pipe_count = 4;
- pool->base.mpcc_count = pool->base.pipe_count;
- dc->debug = debug_defaults_diags;
- } else {
- dc->debug = debug_defaults_diags;
- }
+
//dcn2.0x
dc->work_arounds.dedcn20_305_wa = true;
@@ -2734,9 +2706,8 @@ static bool dcn20_resource_construct(
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
- goto create_fail;
+ &res_create_funcs))
+ goto create_fail;
dcn20_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index da0241e8c255..6d1a8924e57b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -58,10 +58,11 @@ unsigned int dcn20_calc_max_scaled_time(
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
-struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
- struct dc_state *state,
+struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream);
+ const struct pipe_ctx *opp_head_pipe);
struct stream_encoder *dcn20_stream_encoder_create(
enum engine_id eng_id,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
index 1aeb04fbd89d..9e027db6d752 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -231,52 +231,39 @@ void dcn201_init_hw(struct dc *dc)
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
- REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
-
- hws->funcs.dccg_init(hws);
-
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
- REG_WRITE(REFCLK_CNTL, 0);
- } else {
- hws->funcs.bios_golden_init(dc);
-
- if (dc->ctx->dc_bios->fw_info_valid) {
- res_pool->ref_clocks.xtalin_clock_inKhz =
- dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (res_pool->dccg && res_pool->hubbub) {
- (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
- dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
- &res_pool->ref_clocks.dccg_ref_clock_inKhz);
-
- (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
- res_pool->ref_clocks.dccg_ref_clock_inKhz,
- &res_pool->ref_clocks.dchub_ref_clock_inKhz);
- } else {
- res_pool->ref_clocks.dccg_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- res_pool->ref_clocks.dchub_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- }
- }
- } else
- ASSERT_CRITICAL(false);
- for (i = 0; i < dc->link_count; i++) {
- /* Power up AND update implementation according to the
- * required signal (which may be different from the
- * default signal on connector).
- */
- struct dc_link *link = dc->links[i];
-
- link->link_enc->funcs->hw_init(link->link_enc);
+ hws->funcs.bios_golden_init(dc);
+
+ if (dc->ctx->dc_bios->fw_info_valid) {
+ res_pool->ref_clocks.xtalin_clock_inKhz =
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
+
+ if (res_pool->dccg && res_pool->hubbub) {
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
}
- if (hws->fb_offset.quad_part == 0)
- read_mmhub_vm_setup(hws);
+ } else
+ ASSERT_CRITICAL(false);
+ for (i = 0; i < dc->link_count; i++) {
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector).
+ */
+ struct dc_link *link = dc->links[i];
+
+ link->link_enc->funcs->hw_init(link->link_enc);
}
+ if (hws->fb_offset.quad_part == 0)
+ read_mmhub_vm_setup(hws);
/* Blank pixel data with OPP DPG */
for (i = 0; i < res_pool->timing_generator_count; i++) {
@@ -362,10 +349,6 @@ void dcn201_init_hw(struct dc *dc)
tg->funcs->tg_init(tg);
}
- /* end of FPGA. Below if real ASIC */
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- return;
-
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -496,7 +479,7 @@ void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
/* If there is no full update, don't need to touch MPC tree*/
if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
- dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
return;
}
@@ -521,7 +504,7 @@ void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
dc->res_pool->mpc, mpcc_id);
/* Call MPC to insert new plane */
- dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
mpc_tree_params,
&blnd_cfg,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
index 9c16633e473a..92dd4cddbab8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
@@ -91,7 +91,7 @@ static const struct hw_sequencer_funcs dcn201_funcs = {
.enable_dp_link_output = dce110_enable_dp_link_output,
.disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn201_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c
index 730875dfd8b4..70fcbec03fb6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c
@@ -55,10 +55,9 @@ static void optc201_triplebuffer_lock(struct timing_generator *optc)
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 1);
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
- REG_WAIT(OTG_MASTER_UPDATE_LOCK,
- UPDATE_LOCK_STATUS, 1,
- 1, 10);
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 10);
}
static void optc201_triplebuffer_unlock(struct timing_generator *optc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
index 6ea70da28aaa..2dc4d2c1410b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
@@ -613,6 +613,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_tri_buf = false,
+ .enable_legacy_fast_update = true,
};
static void dcn201_dpp_destroy(struct dpp **dpp)
@@ -896,13 +897,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn201_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hwseq = dcn201_hwseq_create,
-};
-
static void dcn201_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
@@ -998,14 +992,15 @@ static struct hubp *dcn201_hubp_create(
return NULL;
}
-static struct pipe_ctx *dcn201_acquire_idle_pipe_for_layer(
- struct dc_state *context,
+static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream)
+ const struct pipe_ctx *opp_head_pipe)
{
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
+ struct resource_context *res_ctx = &new_ctx->res_ctx;
+ struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream);
+ struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe);
if (!head_pipe)
ASSERT(0);
@@ -1073,7 +1068,7 @@ static struct resource_funcs dcn201_res_pool_funcs = {
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.add_dsc_to_stream_resource = NULL,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
- .acquire_idle_pipe_for_layer = dcn201_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn201_acquire_free_pipe_for_layer,
.populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
@@ -1272,9 +1267,8 @@ static bool dcn201_resource_construct(
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
- goto create_fail;
+ &res_create_funcs))
+ goto create_fail;
dcn201_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c
index 33fc9aa8621b..d07c04458d31 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.c
@@ -43,7 +43,7 @@
#define DC_LOGGER \
dccg->ctx->logger
-void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
index e44a37491c1e..b7efa777ec73 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
@@ -32,6 +32,5 @@ struct dccg *dccg21_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask);
-void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
#endif /* __DCN21_DCCG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 58e459c7e7d3..f976fac8dc3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -667,7 +667,6 @@ static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip
static void dmcub_PLAT_54186_wa(struct hubp *hubp,
struct surface_flip_registers *flip_regs)
{
- struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
union dmub_rb_cmd cmd;
@@ -690,11 +689,7 @@ static void dmcub_PLAT_54186_wa(struct hubp *hubp,
cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
PERF_TRACE(); // TODO: remove after performance is stable.
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- PERF_TRACE(); // TODO: remove after performance is stable.
- dc_dmub_srv_cmd_execute(dmcub);
- PERF_TRACE(); // TODO: remove after performance is stable.
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
PERF_TRACE(); // TODO: remove after performance is stable.
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 2a182c2f57d6..43463d08f21b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -152,13 +152,28 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio
cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
+static void dmub_abm_set_backlight(struct dc_context *dc, uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp, uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+ cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
+ cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
+ cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
{
struct abm *abm = pipe_ctx->stream_res.abm;
@@ -173,8 +188,12 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
}
if (abm && panel_cntl) {
- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE,
- panel_cntl->inst);
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE,
+ panel_cntl->inst);
+ } else {
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst);
+ }
panel_cntl->funcs->store_backlight_level(panel_cntl);
}
}
@@ -191,18 +210,21 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
return;
}
- if (abm && panel_cntl)
- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ if (abm && panel_cntl) {
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ } else {
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ }
+ }
}
bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp)
{
- union dmub_rb_cmd cmd;
struct dc_context *dc = pipe_ctx->stream->ctx;
struct abm *abm = pipe_ctx->stream_res.abm;
- uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
if (dc->dc->res_pool->dmcu) {
@@ -210,21 +232,23 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
return true;
}
- if (abm && panel_cntl)
- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ if (abm != NULL) {
+ uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
- memset(&cmd, 0, sizeof(cmd));
- cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
- cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
- cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
- cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
- cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
- cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_cntl->inst);
- cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+ if (abm && panel_cntl) {
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ } else {
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ }
+ }
+ }
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm)
+ abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16,
+ frame_ramp, 0, panel_cntl->inst);
+ else
+ dmub_abm_set_backlight(dc, backlight_pwm_u16_16, frame_ramp, panel_cntl->inst);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index fe1a8e2e08ef..f024157bd6eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -106,7 +106,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.is_abm_supported = dcn21_is_abm_supported,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
@@ -151,8 +151,4 @@ void dcn21_hw_sequencer_construct(struct dc *dc)
dc->hwss = dcn21_funcs;
dc->hwseq->funcs = dcn21_private_funcs;
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- dc->hwss.init_hw = dcn20_fpga_init_hw;
- dc->hwseq->funcs.init_pipes = NULL;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 19aaa557b2db..d1a25fe6c44f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -653,28 +653,14 @@ static const struct dc_debug_options debug_defaults_drv = {
.usbc_combo_phy_reset_wa = true,
.dmub_command_table = true,
.use_max_lb = true,
-};
-
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = false,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = true,
- .disable_48mhz_pwrdwn = true,
- .enable_tri_buf = true,
- .use_max_lb = true
+ .enable_legacy_fast_update = true,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -869,8 +855,8 @@ bool dcn21_fast_validate_bw(struct dc *dc,
/* We only support full screen mpo with ODM */
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
@@ -1219,13 +1205,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn21_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hwseq = dcn21_hwseq_create,
-};
-
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
@@ -1409,7 +1388,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn21_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
@@ -1503,11 +1482,6 @@ static bool dcn21_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
- pool->base.pipe_count = 4;
- dc->debug = debug_defaults_diags;
- } else
- dc->debug = debug_defaults_diags;
// Init the vm_helper
if (dc->vm_helper)
@@ -1721,9 +1695,8 @@ static bool dcn21_resource_construct(
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
- goto create_fail;
+ &res_create_funcs))
+ goto create_fail;
dcn21_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index b7c2ae9ddfda..4a3e9e47b6b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -1,16 +1,16 @@
-#
+#
# Copyright 2020 Advanced Micro Devices, Inc.
-#
+#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
-#
+#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
@@ -18,17 +18,31 @@
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
-#
+#
# Authors: AMD
-#
-#
+#
+#
+
+DCN30 := \
+ dcn30_init.o \
+ dcn30_hubbub.o \
+ dcn30_hubp.o \
+ dcn30_dpp.o \
+ dcn30_optc.o \
+ dcn30_dccg.o \
+ dcn30_hwseq.o \
+ dcn30_mpc.o dcn30_vpg.o \
+ dcn30_afmt.o \
+ dcn30_dio_stream_encoder.o \
+ dcn30_dwb.o \
+ dcn30_dpp_cm.o \
+ dcn30_dwb_cm.o \
+ dcn30_cm_common.o \
+ dcn30_mmhubbub.o \
+ dcn30_resource.o \
+ dcn30_dio_link_encoder.o
-DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
- dcn30_dccg.o dcn30_hwseq.o dcn30_mpc.o dcn30_vpg.o \
- dcn30_afmt.o dcn30_dio_stream_encoder.o dcn30_dwb.o \
- dcn30_dpp_cm.o dcn30_dwb_cm.o dcn30_cm_common.o dcn30_mmhubbub.o \
- dcn30_dio_link_encoder.o dcn30_resource.o
AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 9d08127d209b..005dbe099a7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -436,6 +436,21 @@ void enc3_stream_encoder_update_dp_info_packets(
&info_frame->vsc,
true);
}
+ /* TODO: VSC SDP at packetIndex 1 should be retricted only if PSR-SU on.
+ * There should have another Infopacket type (e.g. vsc_psrsu) for PSR_SU.
+ * In addition, currently the driver check the valid bit then update and
+ * send the corresponding Infopacket. For PSR-SU, the SDP only be sent
+ * while entering PSR-SU mode. So we need another parameter(e.g. send)
+ * in dc_info_packet to indicate which infopacket should be enabled by
+ * default here.
+ */
+ if (info_frame->vsc.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 1, /* packetIndex */
+ &info_frame->vsc,
+ true);
+ }
/* TODO: VSC SDP at packetIndex 1 should be restricted only if PSR-SU on.
* There should have another Infopacket type (e.g. vsc_psrsu) for PSR_SU.
* In addition, currently the driver check the valid bit then update and
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index e5b7ef7422b8..50dc83404644 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -357,8 +357,11 @@ void dpp3_set_cursor_attributes(
int cur_rom_en = 0;
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
- color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
- cur_rom_en = 1;
+ color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
+ cur_rom_en = 1;
+ }
+ }
REG_UPDATE_3(CURSOR0_CONTROL,
CUR0_MODE, color_format,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
index 6a3d3a0ec0a3..701c7d8bc038 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
@@ -280,7 +280,7 @@ bool dwb3_ogam_set_input_transfer_func(
dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL);
if (dwb_ogam_lut) {
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(dwbc->ctx,
in_transfer_func_dwb_ogam,
dwb_ogam_lut, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index e46bbe7ddcc9..2861d974fcf6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -449,6 +449,12 @@ void hubp3_read_state(struct hubp *hubp)
SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+ if (REG(UCLK_PSTATE_FORCE))
+ s->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE);
+
+ if (REG(DCHUBP_CNTL))
+ s->hubp_cntl = REG_READ(DCHUBP_CNTL);
+
}
void hubp3_setup(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 8263a07f265f..6cef62d7a2e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -106,7 +106,7 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
if (stream->func_shaper->type == TF_TYPE_HWPWL) {
shaper_lut = &stream->func_shaper->pwl;
} else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(stream->func_shaper,
+ cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper,
&dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
}
@@ -330,10 +330,6 @@ void dcn30_enable_writeback(
DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
__func__, wb_info->dwb_pipe_inst,\
wb_info->mpcc_inst);
- if (IS_DIAG_DC(dc->ctx->dce_environment)) {
- /*till diags switch to warmup interface*/
- dcn30_mmhubbub_warmup(dc, 1, wb_info);
- }
/* Update writeback pipe */
dcn30_set_writeback(dc, wb_info, context);
@@ -447,28 +443,6 @@ void dcn30_init_hw(struct dc *dc)
if (res_pool->dccg->funcs->dccg_init)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-
- REG_WRITE(REFCLK_CNTL, 0);
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
- REG_WRITE(DIO_MEM_PWR_CTRL, 0);
-
- if (!dc->debug.disable_clock_gate) {
- /* enable all DCN clock gating */
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
-
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-
- REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
- }
-
- //Enable ability to power gate / don't force power on permanently
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(hws, true);
-
- return;
- }
-
if (!dcb->funcs->is_accelerated_mode(dcb)) {
hws->funcs.bios_golden_init(dc);
hws->funcs.disable_vga(dc->hwseq);
@@ -487,27 +461,30 @@ void dcn30_init_hw(struct dc *dc)
REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
}
+ if (dc->debug.enable_mem_low_power.bits.vga) {
+ // Power down VGA memory
+ REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
+ }
+
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (res_pool->dccg && res_pool->hubbub) {
+ if (res_pool->dccg && res_pool->hubbub) {
- (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
- dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
- &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
- (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
- res_pool->ref_clocks.dccg_ref_clock_inKhz,
- &res_pool->ref_clocks.dchub_ref_clock_inKhz);
- } else {
- // Not all ASICs have DCCG sw component
- res_pool->ref_clocks.dccg_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- res_pool->ref_clocks.dchub_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- }
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
}
} else
ASSERT_CRITICAL(false);
@@ -632,7 +609,7 @@ void dcn30_init_hw(struct dc *dc)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
// Get DMCUB capabilities
- dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
+ dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
}
@@ -736,8 +713,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ;
cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -859,9 +835,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.cursor_height = cursor_attr.height;
cmd.mall.cursor_pitch = cursor_attr.pitch;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
/* Use copied cursor, and it's okay to not switch back */
cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
@@ -877,8 +851,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.tmr_scale = tmr_scale;
cmd.mall.debug_bits = dc->debug.mall_error_as_fatal;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -895,9 +868,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.header.payload_bytes =
sizeof(cmd.mall) - sizeof(cmd.mall.header);
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 3216d10c58ba..3d19acaa12f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -106,7 +106,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
.is_abm_supported = dcn21_is_abm_supported
};
@@ -151,8 +151,4 @@ void dcn30_hw_sequencer_construct(struct dc *dc)
dc->hwss = dcn30_funcs;
dc->hwseq->funcs = dcn30_private_funcs;
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- dc->hwss.init_hw = dcn20_fpga_init_hw;
- dc->hwseq->funcs.init_pipes = NULL;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index c95f000b63b2..5bf4d0aa6230 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -55,10 +55,9 @@ void optc3_triplebuffer_lock(struct timing_generator *optc)
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 1);
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
- REG_WAIT(OTG_MASTER_UPDATE_LOCK,
- UPDATE_LOCK_STATUS, 1,
- 1, 10);
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 10);
TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true);
}
@@ -216,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc,
optc1->opp_count = 1;
}
-static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -280,6 +279,9 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in
* Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN,
* VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers.
*
+ * @optc: timing_generator instance.
+ * @enable: Enable DRR double buffering control if true, disable otherwise.
+ *
* Options: any time, start of frame, dp start of frame (range timing)
*/
static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable)
@@ -291,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
}
-static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
+void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -301,7 +303,12 @@ static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *o
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
{
- optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
+ struct dc *dc = optc->ctx->dc;
+
+ if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams)
+ dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max);
+ else
+ optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
}
void optc3_tg_init(struct timing_generator *optc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
index fb06dc9a4893..d3a056c12b0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
@@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
void optc3_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
+void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing);
+void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc);
void optc3_tg_init(struct timing_generator *optc);
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
#endif /* __DC_OPTC_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 67a34cda3774..88c0b24a3249 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -725,31 +725,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.use_max_lb = true,
- .exit_idle_opt_for_cursor_updates = true
-};
-
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true, //No dmcu on DCN30
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = false,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .enable_tri_buf = true,
- .use_max_lb = true
+ .exit_idle_opt_for_cursor_updates = true,
+ .enable_legacy_fast_update = false,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
};
@@ -1076,13 +1060,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn30_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hwseq = dcn30_hwseq_create,
-};
-
static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)
{
unsigned int i;
@@ -1729,8 +1706,8 @@ noinline bool dcn30_internal_validate_bw(
/* We only support full screen mpo with ODM */
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
@@ -2011,11 +1988,10 @@ bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc,
if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(context))
return false;
- // check if freesync enabled
if (!context->streams[0]->allow_freesync)
return false;
- if (context->streams[0]->vrr_active_variable)
+ if (context->streams[0]->vrr_active_variable && dc->debug.disable_fams_gaming)
return false;
context->streams[0]->fpo_in_use = true;
@@ -2087,7 +2063,8 @@ bool dcn30_validate_bandwidth(struct dc *dc,
}
DC_FP_START();
- dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ if (dc->res_pool->funcs->calculate_wm_and_dlg)
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_END();
BW_VAL_TRACE_END_WATERMARKS();
@@ -2239,7 +2216,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -2353,6 +2330,7 @@ static bool dcn30_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
+ dc->caps.max_v_total = (1 << 15) - 1;
/* read VBIOS LTTPR caps */
{
@@ -2376,10 +2354,7 @@ static bool dcn30_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
- dc->debug = debug_defaults_diags;
- } else
- dc->debug = debug_defaults_diags;
+
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
@@ -2577,8 +2552,7 @@ static bool dcn30_resource_construct(
/* Audio, Stream Encoders including DIG and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
+ &res_create_funcs))
goto create_fail;
/* HW Sequencer and Plane caps */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 7aa628c21973..9002cb10a6ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -11,7 +11,8 @@
# Makefile for dcn30.
DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
- dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
+ dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \
+ dcn301_optc.o
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index 6192851c59ed..257df8660b4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -107,7 +107,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn301_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
new file mode 100644
index 000000000000..b3cfcb887905
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn301_optc.h"
+#include "dc.h"
+#include "dcn_calc_math.h"
+#include "dc_dmub_srv.h"
+
+#include "dml/dcn30/dcn30_fpu.h"
+#include "dc_trace.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+
+/**
+ * optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
+ *
+ * @optc: timing_generator instance.
+ * @params: parameters used for Dynamic Refresh Rate.
+ */
+void optc301_set_drr(
+ struct timing_generator *optc,
+ const struct drr_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ if (params->vertical_total_mid != 0) {
+
+ REG_SET(OTG_V_TOTAL_MID, 0,
+ OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
+
+ REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
+ OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
+ OTG_VTOTAL_MID_FRAME_NUM,
+ (uint8_t)params->vertical_total_mid_frame_num);
+
+ }
+
+ optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
+
+ REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, 0);
+ // Setup manual flow control for EOF via TRIG_A
+ optc->funcs->setup_manual_trigger(optc);
+
+ } else {
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
+ }
+}
+
+
+void optc301_setup_manual_trigger(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_8(OTG_TRIGA_CNTL, 0,
+ OTG_TRIGA_SOURCE_SELECT, 21,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
+ OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
+ OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
+ OTG_TRIGA_POLARITY_SELECT, 0,
+ OTG_TRIGA_FREQUENCY_SELECT, 0,
+ OTG_TRIGA_DELAY, 0,
+ OTG_TRIGA_CLEAR, 1);
+}
+
+static struct timing_generator_funcs dcn30_tg_funcs = {
+ .validate_timing = optc1_validate_timing,
+ .program_timing = optc1_program_timing,
+ .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+ .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+ .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
+ .program_global_sync = optc1_program_global_sync,
+ .enable_crtc = optc2_enable_crtc,
+ .disable_crtc = optc1_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = optc1_is_counter_moving,
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .get_otg_active_size = optc1_get_otg_active_size,
+ .set_early_control = optc1_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = optc1_wait_for_state,
+ .set_blank_color = optc3_program_blank_color,
+ .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+ .triplebuffer_lock = optc3_triplebuffer_lock,
+ .triplebuffer_unlock = optc2_triplebuffer_unlock,
+ .enable_reset_trigger = optc1_enable_reset_trigger,
+ .enable_crtc_reset = optc1_enable_crtc_reset,
+ .disable_reset_trigger = optc1_disable_reset_trigger,
+ .lock = optc3_lock,
+ .unlock = optc1_unlock,
+ .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
+ .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
+ .enable_optc_clock = optc1_enable_optc_clock,
+ .set_drr = optc301_set_drr,
+ .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc3_set_vtotal_min_max,
+ .set_static_screen_control = optc1_set_static_screen_control,
+ .program_stereo = optc1_program_stereo,
+ .is_stereo_left_eye = optc1_is_stereo_left_eye,
+ .tg_init = optc3_tg_init,
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
+ .setup_global_swap_lock = NULL,
+ .get_crc = optc1_get_crc,
+ .configure_crc = optc2_configure_crc,
+ .set_dsc_config = optc3_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
+ .set_dwb_source = NULL,
+ .set_odm_bypass = optc3_set_odm_bypass,
+ .set_odm_combine = optc3_set_odm_combine,
+ .get_optc_source = optc2_get_optc_source,
+ .set_out_mux = optc3_set_out_mux,
+ .set_drr_trigger_window = optc3_set_drr_trigger_window,
+ .set_vtotal_change_limit = optc3_set_vtotal_change_limit,
+ .set_gsl = optc2_set_gsl,
+ .set_gsl_source_select = optc2_set_gsl_source_select,
+ .set_vtg_params = optc1_set_vtg_params,
+ .program_manual_trigger = optc2_program_manual_trigger,
+ .setup_manual_trigger = optc301_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
+ .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
+};
+
+void dcn301_timing_generator_init(struct optc *optc1)
+{
+ optc1->base.funcs = &dcn30_tg_funcs;
+
+ optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+ optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
+
+ optc1->min_h_blank = 32;
+ optc1->min_v_blank = 3;
+ optc1->min_v_blank_interlace = 5;
+ optc1->min_h_sync_width = 4;
+ optc1->min_v_sync_width = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
new file mode 100644
index 000000000000..b49585682a15
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPTC_DCN301_H__
+#define __DC_OPTC_DCN301_H__
+
+#include "dcn20/dcn20_optc.h"
+#include "dcn30/dcn30_optc.h"
+
+void dcn301_timing_generator_init(struct optc *optc1);
+void optc301_setup_manual_trigger(struct timing_generator *optc);
+void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params);
+
+#endif /* __DC_OPTC_DCN301_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 5ac2a272c380..79d6697d13b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -42,7 +42,7 @@
#include "dcn30/dcn30_hubp.h"
#include "irq/dcn30/irq_service_dcn30.h"
#include "dcn30/dcn30_dpp.h"
-#include "dcn30/dcn30_optc.h"
+#include "dcn301/dcn301_optc.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
@@ -702,23 +702,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.exit_idle_opt_for_cursor_updates = true
};
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = false,
- .disable_hubp_power_gate = false,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = true,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .use_max_lb = false,
-};
-
static void dcn301_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -872,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create(
tgn10->tg_shift = &optc_shift;
tgn10->tg_mask = &optc_mask;
- dcn30_timing_generator_init(tgn10);
+ dcn301_timing_generator_init(tgn10);
return &tgn10->base;
}
@@ -1047,13 +1030,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn301_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hwseq = dcn301_hwseq_create,
-};
-
static void dcn301_destruct(struct dcn301_resource_pool *pool)
{
unsigned int i;
@@ -1403,7 +1379,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1449,9 +1425,9 @@ static bool dcn301_resource_construct(
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
@@ -1513,10 +1489,7 @@ static bool dcn301_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
- dc->debug = debug_defaults_diags;
- } else
- dc->debug = debug_defaults_diags;
+
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
@@ -1710,9 +1683,8 @@ static bool dcn301_resource_construct(
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
- goto create_fail;
+ &res_create_funcs))
+ goto create_fail;
/* HW Sequencer and Plane caps */
dcn301_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 9f93c43115ba..447abcd593be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -95,31 +95,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.use_max_lb = true,
- .exit_idle_opt_for_cursor_updates = true
-};
-
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = false,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .enable_tri_buf = true,
- .use_max_lb = true
+ .exit_idle_opt_for_cursor_updates = true,
+ .enable_legacy_fast_update = false,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
};
@@ -954,13 +938,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn302_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hwseq = dcn302_hwseq_create,
-};
-
static bool is_soc_bounding_box_valid(struct dc *dc)
{
uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
@@ -1159,7 +1136,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1252,6 +1229,7 @@ static bool dcn302_resource_construct(
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.max_v_total = (1 << 15) - 1;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
@@ -1309,8 +1287,6 @@ static bool dcn302_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else
- dc->debug = debug_defaults_diags;
// Init the vm_helper
if (dc->vm_helper)
@@ -1489,8 +1465,7 @@ static bool dcn302_resource_construct(
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, pool,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
+ &res_create_funcs))
goto create_fail;
/* HW Sequencer and Plane caps */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 7f72ef882ca4..adf4989177f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -81,27 +81,11 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_idle_power_optimizations = false,
};
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = false,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .enable_tri_buf = true,
-};
-
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
};
@@ -881,13 +865,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn303_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hwseq = dcn303_hwseq_create,
-};
-
static bool is_soc_bounding_box_valid(struct dc *dc)
{
uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
@@ -1085,7 +1062,7 @@ static struct resource_funcs dcn303_res_pool_funcs = {
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1176,6 +1153,7 @@ static bool dcn303_resource_construct(
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.max_v_total = (1 << 15) - 1;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
@@ -1213,6 +1191,7 @@ static bool dcn303_resource_construct(
dc->caps.dp_hdmi21_pcon_support = true;
+ dc->config.dc_mode_clk_limit_support = true;
/* read VBIOS LTTPR caps */
if (ctx->dc_bios->funcs->get_lttpr_caps) {
enum bp_result bp_query_result;
@@ -1232,8 +1211,6 @@ static bool dcn303_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else
- dc->debug = debug_defaults_diags;
// Init the vm_helper
if (dc->vm_helper)
@@ -1400,8 +1377,7 @@ static bool dcn303_resource_construct(
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, pool,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
+ &res_create_funcs))
goto create_fail;
/* HW Sequencer and Plane caps */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
index 4c2fdfea162f..8664f0c4c9b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
@@ -47,6 +47,14 @@ void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ if (dccg->dpp_clock_gated[dpp_inst]) {
+ /*
+ * Do not update the DPPCLK DTO if the clock is stopped.
+ * It is treated the same as if the pipe itself were in PG.
+ */
+ return;
+ }
+
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
int modulo, phase;
@@ -76,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk(
struct dcn_dccg *dccg_dcn,
enum phyd32clk_clock_source src)
{
- if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
if (src == PHYD32CLKC)
src = PHYD32CLKF;
if (src == PHYD32CLKD)
@@ -276,19 +285,11 @@ void dccg31_enable_symclk32_le(
/* select one of the PHYD32CLKs as the source for symclk32_le */
switch (hpo_le_inst) {
case 0:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE0_GATE_DISABLE, 1,
- SYMCLK32_ROOT_LE0_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE0_SRC_SEL, phyd32clk,
SYMCLK32_LE0_EN, 1);
break;
case 1:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE1_GATE_DISABLE, 1,
- SYMCLK32_ROOT_LE1_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE1_SRC_SEL, phyd32clk,
SYMCLK32_LE1_EN, 1);
@@ -311,19 +312,38 @@ void dccg31_disable_symclk32_le(
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE0_SRC_SEL, 0,
SYMCLK32_LE0_EN, 0);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE0_GATE_DISABLE, 0,
- SYMCLK32_ROOT_LE0_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE1_SRC_SEL, 0,
SYMCLK32_LE1_EN, 0);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
- REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
- SYMCLK32_LE1_GATE_DISABLE, 0,
- SYMCLK32_ROOT_LE1_GATE_DISABLE, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_set_symclk32_le_root_clock_gating(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ bool enable)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ return;
+
+ switch (hpo_le_inst) {
+ case 0:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE0_GATE_DISABLE, enable ? 1 : 0,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, enable ? 1 : 0);
+ break;
+ case 1:
+ REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_LE1_GATE_DISABLE, enable ? 1 : 0,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, enable ? 1 : 0);
break;
default:
BREAK_TO_DEBUGGER();
@@ -652,10 +672,8 @@ void dccg31_init(struct dccg *dccg)
dccg31_disable_symclk32_se(dccg, 2);
dccg31_disable_symclk32_se(dccg, 3);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) {
- dccg31_disable_symclk32_le(dccg, 0);
- dccg31_disable_symclk32_le(dccg, 1);
- }
+ dccg31_set_symclk32_le_root_clock_gating(dccg, 0, false);
+ dccg31_set_symclk32_le_root_clock_gating(dccg, 1, false);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
dccg31_disable_dpstreamclk(dccg, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
index 0902ce5eb8a1..e3caaacf7493 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
@@ -179,6 +179,11 @@ void dccg31_disable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst);
+void dccg31_set_symclk32_le_root_clock_gating(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ bool enable);
+
void dccg31_set_physymclk(
struct dccg *dccg,
int phy_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 745a5d187a98..4596f3bac1b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -117,7 +117,6 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc,
union dmub_rb_cmd *cmd)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
memset(cmd, 0, sizeof(*cmd));
cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS;
@@ -126,7 +125,7 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc,
cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
- if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd))
+ if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
return false;
return true;
@@ -425,7 +424,6 @@ static bool link_dpia_control(struct dc_context *dc_ctx,
struct dmub_cmd_dig_dpia_control_data *dpia_control)
{
union dmub_rb_cmd cmd;
- struct dc_dmub_srv *dmub = dc_ctx->dmub_srv;
memset(&cmd, 0, sizeof(cmd));
@@ -438,9 +436,7 @@ static bool link_dpia_control(struct dc_context *dc_ctx,
cmd.dig1_dpia_control.dpia_control = *dpia_control;
- dc_dmub_srv_cmd_queue(dmub, &cmd);
- dc_dmub_srv_cmd_execute(dmub);
- dc_dmub_srv_wait_idle(dmub);
+ dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -562,7 +558,7 @@ void dcn31_link_encoder_disable_output(
struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
struct dc_link *link;
- if (!dcn10_is_dig_enabled(enc))
+ if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc))
return;
link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 0278bae50a9d..45143459eedd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -154,7 +154,7 @@ static void dcn31_hpo_dp_stream_enc_dp_blank(
VID_STREAM_STATUS, 0,
10, 5000);
- /* Disable SDP tranmission */
+ /* Disable SDP transmission */
REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
SDP_STREAM_ENABLE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 7e7cd5b64e6a..1f4e0b6261ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -103,6 +103,7 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
default:
break;
}
+ DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments);
/* Should never be hit, if it is we have an erroneous hw config*/
ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
@@ -1017,8 +1018,8 @@ void hubbub31_init(struct hubbub *hubbub)
/*done in hwseq*/
/*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
- DISPCLK_R_DCHUBBUB_GATE_DIS, 0,
- DCFCLK_R_DCHUBBUB_GATE_DIS, 0);
+ DISPCLK_R_DCHUBBUB_GATE_DIS, 1,
+ DCFCLK_R_DCHUBBUB_GATE_DIS, 1);
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 62ce36c75c4d..2a7f47642a44 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -117,28 +117,6 @@ void dcn31_init_hw(struct dc *dc)
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-
- REG_WRITE(REFCLK_CNTL, 0);
- REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
- REG_WRITE(DIO_MEM_PWR_CTRL, 0);
-
- if (!dc->debug.disable_clock_gate) {
- /* enable all DCN clock gating */
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
-
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-
- REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
- }
-
- //Enable ability to power gate / don't force power on permanently
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(hws, true);
-
- return;
- }
-
if (!dcb->funcs->is_accelerated_mode(dcb)) {
hws->funcs.bios_golden_init(dc);
if (hws->funcs.disable_vga)
@@ -154,23 +132,21 @@ void dcn31_init_hw(struct dc *dc)
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- if (res_pool->dccg && res_pool->hubbub) {
-
- (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
- dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
- &res_pool->ref_clocks.dccg_ref_clock_inKhz);
-
- (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
- res_pool->ref_clocks.dccg_ref_clock_inKhz,
- &res_pool->ref_clocks.dchub_ref_clock_inKhz);
- } else {
- // Not all ASICs have DCCG sw component
- res_pool->ref_clocks.dccg_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- res_pool->ref_clocks.dchub_ref_clock_inKhz =
- res_pool->ref_clocks.xtalin_clock_inKhz;
- }
+ if (res_pool->dccg && res_pool->hubbub) {
+
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
}
} else
ASSERT_CRITICAL(false);
@@ -197,10 +173,6 @@ void dcn31_init_hw(struct dc *dc)
}
}
- /* Enables outbox notifications for usb4 dpia */
- if (dc->res_pool->usb4_dpia_count)
- dmub_enable_outbox_notification(dc->ctx->dmub_srv);
-
/* we want to turn off all dp displays before doing detection */
dc->link_srv->blank_all_dp_displays(dc);
@@ -297,8 +269,9 @@ void dcn31_init_hw(struct dc *dc)
#endif
// Get DMCUB capabilities
- dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
+ dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
}
void dcn31_dsc_pg_control(
@@ -442,9 +415,7 @@ void dcn31_z10_save_init(struct dc *dc)
cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dcn31_z10_restore(const struct dc *dc)
@@ -462,9 +433,7 @@ void dcn31_z10_restore(const struct dc *dc)
cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
@@ -560,35 +529,31 @@ static void dcn31_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- link = pipe_ctx->stream->link;
- /* DPMS may already disable or */
- /* dpms_off status is incorrect due to fastboot
- * feature. When system resume from S4 with second
- * screen only, the dpms_off would be true but
- * VBIOS lit up eDP, so check link status too.
- */
- if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- dc->link_srv->set_dpms_off(pipe_ctx);
- else if (pipe_ctx->stream_res.audio)
- dc->hwss.disable_audio_stream(pipe_ctx);
-
- /* free acquired resources */
- if (pipe_ctx->stream_res.audio) {
- /*disable az_endpoint*/
- pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
-
- /*free audio*/
- if (dc->caps.dynamic_audio == true) {
- /*we have to dynamic arbitrate the audio endpoints*/
- /*we free the resource, need reset is_audio_acquired*/
- update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
- pipe_ctx->stream_res.audio, false);
- pipe_ctx->stream_res.audio = NULL;
- }
+ link = pipe_ctx->stream->link;
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ dc->link_srv->set_dpms_off(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+ /* free acquired resources */
+ if (pipe_ctx->stream_res.audio) {
+ /*disable az_endpoint*/
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ /*free audio*/
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.audio, false);
+ pipe_ctx->stream_res.audio = NULL;
}
- } else if (pipe_ctx->stream_res.dsc) {
- dc->link_srv->set_dsc_enable(pipe_ctx, false);
}
pipe_ctx->stream = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 3a32810bbe38..fc25cc300a17 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -58,6 +58,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
@@ -109,7 +110,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
@@ -153,8 +154,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
dc->hwss = dcn31_funcs;
dc->hwseq->funcs = dcn31_private_funcs;
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- dc->hwss.init_hw = dcn20_fpga_init_hw;
- dc->hwseq->funcs.init_pipes = NULL;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 11ea9d13e312..217acd4e292a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -52,7 +52,7 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub
cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data);
cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst;
- return dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd);
+ return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
}
static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
@@ -85,7 +85,7 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
cmd.panel_cntl.data.bl_pwm_ref_div2 =
panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2;
- if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd))
+ if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
return 0;
panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index ff8cd5076434..82de4fe2637f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -96,6 +96,7 @@
#include "dce/dmub_psr.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dce/dmub_replay.h"
#include "dml/dcn30/display_mode_vba_30.h"
#include "vm_helper.h"
@@ -887,32 +888,16 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.disable_z10 = true,
+ .enable_legacy_fast_update = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
};
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = false,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .enable_tri_buf = true,
- .use_max_lb = true
-};
-
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -1341,13 +1326,6 @@ static struct dce_hwseq *dcn31_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
- /* DCN3.1 FPGA Workaround
- * Need to enable HPO DP Stream Encoder before setting OTG master enable.
- * To do so, move calling function enable_stream_timing to only be done AFTER calling
- * function core_link_enable_stream
- */
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
- hws->wa.dp_hpo_and_otg_sequence = true;
}
return hws;
}
@@ -1360,15 +1338,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn31_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
- .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
- .create_hwseq = dcn31_hwseq_create,
-};
-
static void dcn31_resource_destruct(struct dcn31_resource_pool *pool)
{
unsigned int i;
@@ -1512,6 +1481,9 @@ static void dcn31_resource_destruct(struct dcn31_resource_pool *pool)
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
}
@@ -1809,8 +1781,8 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
-
- dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ if (dc->res_pool->funcs->calculate_wm_and_dlg)
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
@@ -1851,7 +1823,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn31_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1988,10 +1960,7 @@ static bool dcn31_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
- dc->debug = debug_defaults_diags;
- } else
- dc->debug = debug_defaults_diags;
+
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
@@ -2121,6 +2090,14 @@ static bool dcn31_resource_construct(
goto create_fail;
}
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.multiple_abms[i] = dmub_abm_create(ctx,
@@ -2195,9 +2172,8 @@ static bool dcn31_resource_construct(
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
- goto create_fail;
+ &res_create_funcs))
+ goto create_fail;
/* HW Sequencer and Plane caps */
dcn31_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index de7bfba2c179..ad3f019a784f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -45,6 +45,16 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg314_trigger_dio_fifo_resync(
+ struct dccg *dccg)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t dispclk_rdivider_value = 0;
+
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
+ REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
+}
+
static void dccg314_get_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -322,6 +332,9 @@ static void dccg314_dpp_root_clock_control(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ if (dccg->dpp_clock_gated[dpp_inst] != clock_on)
+ return;
+
if (clock_on) {
/* turn off the DTO and leave phase/modulo at max */
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0);
@@ -335,6 +348,8 @@ static void dccg314_dpp_root_clock_control(
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
}
+
+ dccg->dpp_clock_gated[dpp_inst] = !clock_on;
}
static const struct dccg_funcs dccg314_funcs = {
@@ -347,6 +362,7 @@ static const struct dccg_funcs dccg314_funcs = {
.disable_symclk32_se = dccg31_disable_symclk32_se,
.enable_symclk32_le = dccg31_enable_symclk32_le,
.disable_symclk32_le = dccg31_disable_symclk32_le,
+ .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating,
.set_physymclk = dccg31_set_physymclk,
.set_dtbclk_dto = dccg314_set_dtbclk_dto,
.set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
@@ -357,6 +373,7 @@ static const struct dccg_funcs dccg314_funcs = {
.disable_dsc = dccg31_disable_dscclk,
.enable_dsc = dccg31_enable_dscclk,
.set_pixel_rate_div = dccg314_set_pixel_rate_div,
+ .trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync,
.set_valid_pixel_rate = dccg314_set_valid_pixel_rate,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
index 90687a9e8fdd..8e07d3151f91 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
@@ -192,7 +192,10 @@
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\
- DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh)
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, mask_sh),\
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh)
struct dccg *dccg314_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index cc3fe9cac5b5..4d2820ffe468 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -390,6 +390,35 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
pix_per_cycle);
}
+void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context)
+{
+ unsigned int i;
+ struct pipe_ctx *pipe = NULL;
+ bool otg_disabled[MAX_PIPES] = {false};
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->top_pipe || pipe->prev_odm_pipe)
+ continue;
+
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
+ reset_sync_context_for_pipe(dc, context, i);
+ otg_disabled[i] = true;
+ }
+ }
+
+ hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (otg_disabled[i])
+ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+ }
+}
+
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on)
{
if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp)
@@ -400,29 +429,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
}
-void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
-{
- struct dc_context *ctx = hws->ctx;
- union dmub_rb_cmd cmd;
-
- if (hws->ctx->dc->debug.disable_hubp_power_gate)
- return;
-
- PERF_TRACE();
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.domain_control.header.type = DMUB_CMD__VBIOS;
- cmd.domain_control.header.sub_type = DMUB_CMD__VBIOS_DOMAIN_CONTROL;
- cmd.domain_control.header.payload_bytes = sizeof(cmd.domain_control.data);
- cmd.domain_control.data.inst = hubp_inst;
- cmd.domain_control.data.power_gate = !power_on;
-
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(ctx->dmub_srv);
- dc_dmub_srv_wait_idle(ctx->dmub_srv);
-
- PERF_TRACE();
-}
static void apply_symclk_on_tx_off_wa(struct dc_link *link)
{
/* There are use cases where SYMCLK is referenced by OTG. For instance
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index 6d0b62503caa..eafcc4ea6d24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -41,7 +41,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
-void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
+void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context);
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index a588f46b166f..ca8fe55c33b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -60,6 +60,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
@@ -111,7 +112,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
static const struct hwseq_private_funcs dcn314_private_funcs = {
@@ -138,7 +139,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.plane_atomic_power_down = dcn10_plane_atomic_power_down,
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
.dpp_root_clock_control = dcn314_dpp_root_clock_control,
- .hubp_pg_control = dcn314_hubp_pg_control,
+ .hubp_pg_control = dcn31_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn314_update_odm,
.dsc_pg_control = dcn314_dsc_pg_control,
@@ -151,6 +152,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
.set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
+ .resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio,
};
void dcn314_hw_sequencer_construct(struct dc *dc)
@@ -158,8 +160,4 @@ void dcn314_hw_sequencer_construct(struct dc *dc)
dc->hwss = dcn314_funcs;
dc->hwseq->funcs = dcn314_private_funcs;
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- dc->hwss.init_hw = dcn20_fpga_init_hw;
- dc->hwseq->funcs.init_pipes = NULL;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index abeeede38fb3..1c1fb2fa0822 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -93,6 +93,7 @@
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
+#include "dce/dmub_replay.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "dml/dcn314/display_mode_vba_314.h"
@@ -117,23 +118,6 @@
#define regBIF_BX2_BIOS_SCRATCH_6 0x003e
#define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 1
-struct IP_BASE_INSTANCE {
- unsigned int segment[MAX_SEGMENT];
-};
-
-struct IP_BASE {
- struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
-
-static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0, 0, 0 } } } };
-
-
#define DC_LOGGER_INIT(logger)
enum dcn31_clk_src_array_id {
@@ -887,12 +871,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_z9_disable_interface = true,
.minimum_z8_residency_time = 2000,
.psr_skip_crtc_disable = true,
+ .replay_skip_crtc_disabled = true,
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
+ .disable_dpp_power_gate = false,
+ .disable_hubp_power_gate = false,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
@@ -921,6 +906,22 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
+
+ .root_clock_optimization = {
+ .bits = {
+ .dpp = true,
+ .dsc = true,
+ .hdmistream = true,
+ .hdmichar = true,
+ .dpstream = true,
+ .symclk32_se = true,
+ .symclk32_le = true,
+ .symclk_fe = true,
+ .physymclk = true,
+ .dpiasymclk = true,
+ }
+ },
+
.seamless_boot_odm_combine = true
};
@@ -946,6 +947,7 @@ static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -1375,13 +1377,6 @@ static struct dce_hwseq *dcn314_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
- /* DCN3.1 FPGA Workaround
- * Need to enable HPO DP Stream Encoder before setting OTG master enable.
- * To do so, move calling function enable_stream_timing to only be done AFTER calling
- * function core_link_enable_stream
- */
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
- hws->wa.dp_hpo_and_otg_sequence = true;
}
return hws;
}
@@ -1394,15 +1389,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn314_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
- .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
- .create_hwseq = dcn314_hwseq_create,
-};
-
static void dcn314_resource_destruct(struct dcn314_resource_pool *pool)
{
unsigned int i;
@@ -1545,6 +1531,9 @@ static void dcn314_resource_destruct(struct dcn314_resource_pool *pool)
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
}
@@ -1700,7 +1689,9 @@ static bool filter_modes_for_single_channel_workaround(struct dc *dc,
struct dc_state *context)
{
// Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR
- if (dc->clk_mgr->bw_params->vram_type == 34 && dc->clk_mgr->bw_params->num_channels < 2) {
+ if (dc->clk_mgr->bw_params->vram_type == 34 &&
+ dc->clk_mgr->bw_params->num_channels < 2 &&
+ context->stream_count > 1) {
int total_phy_pix_clk = 0;
for (int i = 0; i < context->stream_count; i++)
@@ -1749,8 +1740,8 @@ bool dcn314_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
-
- dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ if (dc->res_pool->funcs->calculate_wm_and_dlg)
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
@@ -1782,7 +1773,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn314_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1920,6 +1911,14 @@ static bool dcn314_resource_construct(
dc->debug = debug_defaults_drv;
else
dc->debug = debug_defaults_diags;
+
+ /* Disable pipe power gating */
+ dc->debug.disable_dpp_power_gate = true;
+ dc->debug.disable_hubp_power_gate = true;
+
+ /* Disable root clock optimization */
+ dc->debug.root_clock_optimization.u32All = 0;
+
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
@@ -2034,6 +2033,14 @@ static bool dcn314_resource_construct(
goto create_fail;
}
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.multiple_abms[i] = dmub_abm_create(ctx,
@@ -2101,8 +2108,7 @@ static bool dcn314_resource_construct(
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
+ &res_create_funcs))
goto create_fail;
/* HW Sequencer and Plane caps */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 41c972c8eb19..127487ea3d7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -136,6 +136,9 @@
#define DCN3_15_MAX_DET_SIZE 384
#define DCN3_15_CRB_SEGMENT_SIZE_KB 64
+#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB)
+/* Minimum 2 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */
+#define MIN_RESERVED_DET_SEGS 2
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
@@ -884,31 +887,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
+ .enable_legacy_fast_update = true,
.psr_power_use_phy_fsm = 0,
};
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = false,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .enable_tri_buf = true,
- .use_max_lb = true
-};
-
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -1339,13 +1326,6 @@ static struct dce_hwseq *dcn31_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
- /* DCN3.1 FPGA Workaround
- * Need to enable HPO DP Stream Encoder before setting OTG master enable.
- * To do so, move calling function enable_stream_timing to only be done AFTER calling
- * function core_link_enable_stream
- */
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
- hws->wa.dp_hpo_and_otg_sequence = true;
}
return hws;
}
@@ -1358,15 +1338,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn31_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
- .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
- .create_hwseq = dcn31_hwseq_create,
-};
-
static void dcn315_resource_destruct(struct dcn315_resource_pool *pool)
{
unsigned int i;
@@ -1636,21 +1607,69 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
+static int source_format_to_bpp (enum source_format_class SourcePixelFormat)
+{
+ if (SourcePixelFormat == dm_444_64)
+ return 8;
+ else if (SourcePixelFormat == dm_444_16)
+ return 2;
+ else if (SourcePixelFormat == dm_444_8)
+ return 1;
+ else if (SourcePixelFormat == dm_rgbe_alpha)
+ return 5;
+ else if (SourcePixelFormat == dm_420_8)
+ return 3;
+ else if (SourcePixelFormat == dm_420_12)
+ return 6;
+ else
+ return 4;
+}
+
+static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ struct resource_context *res_ctx = &context->res_ctx;
+
+ /*Don't apply for single stream*/
+ if (context->stream_count < 2)
+ return false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ /*Don't apply if scaling*/
+ if (res_ctx->pipe_ctx[i].stream->src.width != res_ctx->pipe_ctx[i].stream->dst.width ||
+ res_ctx->pipe_ctx[i].stream->src.height != res_ctx->pipe_ctx[i].stream->dst.height ||
+ (res_ctx->pipe_ctx[i].plane_state && (res_ctx->pipe_ctx[i].plane_state->src_rect.width
+ != res_ctx->pipe_ctx[i].plane_state->dst_rect.width ||
+ res_ctx->pipe_ctx[i].plane_state->src_rect.height
+ != res_ctx->pipe_ctx[i].plane_state->dst_rect.height)))
+ return false;
+ /*Don't apply if MPO to avoid transition issues*/
+ if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state)
+ return false;
+ }
+ return true;
+}
+
static int dcn315_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
- int i, pipe_cnt;
+ int i, pipe_cnt, crb_idx, crb_pipes;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = NULL;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
+ int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB;
+ bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
DC_FP_START();
dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
if (!res_ctx->pipe_ctx[i].stream)
@@ -1671,6 +1690,28 @@ static int dcn315_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
+ if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
+ /* Ceil to crb segment size */
+ int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
+ &context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB);
+
+ if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) {
+ bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS;
+ split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc);
+ split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
+
+ /* Minimum 2 segments to allow mpc/odm combine if its used later */
+ if (approx_det_segs_required_for_pstate < 2)
+ approx_det_segs_required_for_pstate = 2;
+ if (split_required)
+ approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2;
+ pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate;
+ remaining_det_segs -= approx_det_segs_required_for_pstate;
+ } else
+ remaining_det_segs = -1;
+ crb_pipes++;
+ }
DC_FP_END();
if (pipes[pipe_cnt].dout.dsc_enable) {
@@ -1689,16 +1730,55 @@ static int dcn315_populate_dml_pipes_from_context(
break;
}
}
-
pipe_cnt++;
}
+ /* Spread remaining unreserved crb evenly among all pipes*/
+ if (pixel_rate_crb) {
+ for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &res_ctx->pipe_ctx[i];
+ if (!pipe->stream)
+ continue;
+
+ /* Do not use asymetric crb if not enough for pstate support */
+ if (remaining_det_segs < 0) {
+ pipes[pipe_cnt].pipe.src.det_size_override = 0;
+ pipe_cnt++;
+ continue;
+ }
+
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
+ bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
+ || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
+
+ if (remaining_det_segs > MIN_RESERVED_DET_SEGS)
+ pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
+ (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
+ if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
+ /* Clamp to 2 pipe split max det segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
+ pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
+ }
+ if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
+ /* If we are splitting we must have an even number of segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
+ pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
+ }
+ /* Convert segments into size for DML use */
+ pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
+
+ crb_idx++;
+ }
+ pipe_cnt++;
+ }
+ }
+
if (pipe_cnt)
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB;
if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE)
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE;
- ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_15_DEFAULT_DET_SIZE);
+
dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
if (is_dual_plane(pipe->plane_state->format)
@@ -1738,7 +1818,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn315_update_soc_for_wm_a,
.populate_dml_pipes = dcn315_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1845,10 +1925,7 @@ static bool dcn315_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
- dc->debug = debug_defaults_diags;
- } else
- dc->debug = debug_defaults_diags;
+
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
@@ -2029,9 +2106,8 @@ static bool dcn315_resource_construct(
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
- goto create_fail;
+ &res_create_funcs))
+ goto create_fail;
/* HW Sequencer and Plane caps */
dcn31_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index 9ead347a33e9..5fe2c61527df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -884,30 +884,14 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
-};
-
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = false,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .enable_tri_buf = true,
- .use_max_lb = true
+ .enable_legacy_fast_update = true,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -1340,13 +1324,6 @@ static struct dce_hwseq *dcn31_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
- /* DCN3.1 FPGA Workaround
- * Need to enable HPO DP Stream Encoder before setting OTG master enable.
- * To do so, move calling function enable_stream_timing to only be done AFTER calling
- * function core_link_enable_stream
- */
- if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
- hws->wa.dp_hpo_and_otg_sequence = true;
}
return hws;
}
@@ -1359,15 +1336,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn31_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
- .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
- .create_hwseq = dcn31_hwseq_create,
-};
-
static void dcn316_resource_destruct(struct dcn316_resource_pool *pool)
{
unsigned int i;
@@ -1737,7 +1705,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn316_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1844,10 +1812,7 @@ static bool dcn316_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
- dc->debug = debug_defaults_diags;
- } else
- dc->debug = debug_defaults_diags;
+
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
@@ -2028,9 +1993,8 @@ static bool dcn316_resource_construct(
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
- goto create_fail;
+ &res_create_funcs))
+ goto create_fail;
/* HW Sequencer and Plane caps */
dcn31_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index ffbb739d85b6..921f58c0c729 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -42,18 +42,17 @@
#define DC_LOGGER \
dccg->ctx->logger
-/* This function is a workaround for writing to OTG_PIXEL_RATE_DIV
- * without the probability of causing a DIG FIFO error.
- */
-static void dccg32_wait_for_dentist_change_done(
+static void dccg32_trigger_dio_fifo_resync(
struct dccg *dccg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t dispclk_rdivider_value = 0;
- uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL);
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
- REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+ /* Not valid for the WDIVIDER to be set to 0 */
+ if (dispclk_rdivider_value != 0)
+ REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
static void dccg32_get_pixel_rate_div(
@@ -124,29 +123,21 @@ static void dccg32_set_pixel_rate_div(
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG0_PIXEL_RATE_DIVK1, k1,
OTG0_PIXEL_RATE_DIVK2, k2);
-
- dccg32_wait_for_dentist_change_done(dccg);
break;
case 1:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG1_PIXEL_RATE_DIVK1, k1,
OTG1_PIXEL_RATE_DIVK2, k2);
-
- dccg32_wait_for_dentist_change_done(dccg);
break;
case 2:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG2_PIXEL_RATE_DIVK1, k1,
OTG2_PIXEL_RATE_DIVK2, k2);
-
- dccg32_wait_for_dentist_change_done(dccg);
break;
case 3:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
OTG3_PIXEL_RATE_DIVK1, k1,
OTG3_PIXEL_RATE_DIVK2, k2);
-
- dccg32_wait_for_dentist_change_done(dccg);
break;
default:
BREAK_TO_DEBUGGER();
@@ -290,7 +281,8 @@ static void dccg32_set_dpstreamclk(
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
/* set the dtbclk_p source */
- dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
+ /* always program refclk as DTBCLK. No use-case expected to require DPREFCLK as refclk */
+ dccg32_set_dtbclk_p_src(dccg, DTBCLK0, otg_inst);
/* enabled to select one of the DTBCLKs for pipe */
switch (dp_hpo_inst) {
@@ -352,6 +344,7 @@ static const struct dccg_funcs dccg32_funcs = {
.otg_add_pixel = dccg32_otg_add_pixel,
.otg_drop_pixel = dccg32_otg_drop_pixel,
.set_pixel_rate_div = dccg32_set_pixel_rate_div,
+ .trigger_dio_fifo_resync = dccg32_trigger_dio_fifo_resync,
};
struct dccg *dccg32_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
index 8071ab98d708..cf5508718122 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
@@ -112,8 +112,9 @@
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
- DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
-
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh),\
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, mask_sh),\
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh)
struct dccg *dccg32_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index eb08ccc38e79..8bfef6d095b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -42,8 +42,8 @@
hubbub2->shifts->field_name, hubbub2->masks->field_name
/**
- * @DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for
- * DCN32
+ * DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for
+ * DCN32
*/
#define DCN32_CRB_SEGMENT_SIZE_KB 64
@@ -955,8 +955,8 @@ void hubbub32_init(struct hubbub *hubbub)
/*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
- DISPCLK_R_DCHUBBUB_GATE_DIS, 0,
- DCFCLK_R_DCHUBBUB_GATE_DIS, 0);
+ DISPCLK_R_DCHUBBUB_GATE_DIS, 1,
+ DCFCLK_R_DCHUBBUB_GATE_DIS, 1);
}
/*
ignore the "df_pre_cstate_req" from the SDP port control.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 2d604f7ee782..ca5b4b28a664 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -179,6 +179,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_setup_interdependent = hubp2_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
+ .set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp32_cursor_set_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 1f5ee5cde6e1..680e7fa8d18a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -47,11 +47,9 @@
#include "clk_mgr.h"
#include "dsc.h"
#include "dcn20/dcn20_optc.h"
-#include "dmub_subvp_state.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn32_resource.h"
#include "link.h"
-#include "dmub/inc/dmub_subvp_state.h"
#define DC_LOGGER_INIT(logger)
@@ -274,8 +272,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -309,8 +306,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
cmd.cab.cab_alloc_ways = ways;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -326,9 +322,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.cab.header.payload_bytes =
sizeof(cmd.cab) - sizeof(cmd.cab.header);
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -413,6 +407,30 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc,
}
}
+void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params)
+{
+ struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc;
+ bool lock = params->subvp_pipe_control_lock_fast_params.lock;
+ struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx;
+ bool subvp_immediate_flip = false;
+
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) {
+ if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN &&
+ pipe_ctx->plane_state->flip_immediate)
+ subvp_immediate_flip = true;
+ }
+
+ // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared.
+ if (subvp_immediate_flip) {
+ union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
+
+ hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
+ hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
+ hw_lock_cmd.bits.lock = lock;
+ hw_lock_cmd.bits.should_release = !lock;
+ dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
+ }
+}
bool dcn32_set_mpc_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream)
@@ -428,7 +446,7 @@ bool dcn32_set_mpc_shaper_3dlut(
if (stream->func_shaper->type == TF_TYPE_HWPWL)
shaper_lut = &stream->func_shaper->pwl;
else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(stream->ctx,
stream->func_shaper,
&dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
@@ -464,7 +482,7 @@ bool dcn32_set_mcm_luts(
if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
lut_params = &plane_state->blend_tf->pwl;
else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->blend_tf,
&dpp_base->regamma_params, false);
lut_params = &dpp_base->regamma_params;
@@ -479,7 +497,7 @@ bool dcn32_set_mcm_luts(
else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
// TODO: dpp_base replace
ASSERT(false);
- cm_helper_translate_curve_to_hw_format(
+ cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->in_shaper_func,
&dpp_base->shaper_params, true);
lut_params = &dpp_base->shaper_params;
@@ -549,7 +567,7 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
bool ret = false;
/* program OGAM or 3DLUT only for the top pipe*/
- if (pipe_ctx->top_pipe == NULL) {
+ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
/*program shaper and 3dlut in MPC*/
ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
@@ -587,8 +605,8 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = pipe->plane_res.hubp;
- if (!pipe->stream || (pipe->stream && !(pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
- pipe->stream->fpo_in_use))) {
+ if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
+ pipe->stream->fpo_in_use)) {
if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
hubp->funcs->hubp_update_force_pstate_disallow(hubp, false);
}
@@ -596,7 +614,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
/* Today only FPO uses cursor P-State force. Only clear cursor P-State force
* if it's not FPO.
*/
- if (!pipe->stream || (pipe->stream && !pipe->stream->fpo_in_use)) {
+ if (!pipe->stream || !pipe->stream->fpo_in_use) {
if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow)
hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, false);
}
@@ -721,6 +739,9 @@ static void dcn32_initialize_min_clocks(struct dc *dc)
clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
+ clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
+ clocks->fclk_p_state_change_support = true;
+ clocks->p_state_change_support = true;
if (dc->debug.disable_boot_optimizations) {
clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
} else {
@@ -730,9 +751,6 @@ static void dcn32_initialize_min_clocks(struct dc *dc)
* freq to ensure that the timing is valid and unchanged.
*/
clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
- clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
- clocks->fclk_p_state_change_support = true;
- clocks->p_state_change_support = true;
}
dc->clk_mgr->funcs->update_clocks(
@@ -946,8 +964,10 @@ void dcn32_init_hw(struct dc *dc)
// Get DMCUB capabilities
if (dc->ctx->dmub_srv) {
- dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
+ dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
+ dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
+ dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
}
}
@@ -1125,10 +1145,6 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
unsigned int odm_combine_factor = 0;
bool two_pix_per_container = false;
- // For phantom pipes, use the same programming as the main pipes
- if (pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- stream = pipe_ctx->stream->mall_stream_config.paired_stream;
- }
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
@@ -1177,6 +1193,36 @@ void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
pix_per_cycle);
}
+void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context)
+{
+ unsigned int i;
+ struct pipe_ctx *pipe = NULL;
+ bool otg_disabled[MAX_PIPES] = {false};
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER))
+ continue;
+
+ if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))
+ && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
+ reset_sync_context_for_pipe(dc, context, i);
+ otg_disabled[i] = true;
+ }
+ }
+
+ hws->ctx->dc->res_pool->dccg->funcs->trigger_dio_fifo_resync(hws->ctx->dc->res_pool->dccg);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (otg_disabled[i])
+ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+ }
+}
+
void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
@@ -1253,7 +1299,7 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link)
if (link->phy_state.symclk_ref_cnts.otg > 0) {
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
+ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && pipe_ctx->stream->link == link) {
pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
@@ -1336,7 +1382,7 @@ void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe)
{
phantom_pipe->update_flags.raw = 0;
if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- if (phantom_pipe->stream && phantom_pipe->plane_state) {
+ if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) {
phantom_pipe->update_flags.bits.enable = 1;
phantom_pipe->update_flags.bits.mpcc = 1;
phantom_pipe->update_flags.bits.dppclk = 1;
@@ -1346,7 +1392,7 @@ void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe)
phantom_pipe->update_flags.bits.scaler = 1;
phantom_pipe->update_flags.bits.viewport = 1;
phantom_pipe->update_flags.bits.det_size = 1;
- if (!phantom_pipe->top_pipe && !phantom_pipe->prev_odm_pipe) {
+ if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) {
phantom_pipe->update_flags.bits.odm = 1;
phantom_pipe->update_flags.bits.global_sync = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index 6694c1d14aa3..2d2628f31bed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -75,6 +75,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context);
+
void dcn32_subvp_pipe_control_lock(struct dc *dc,
struct dc_state *context,
bool lock,
@@ -82,6 +84,8 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc,
struct pipe_ctx *top_pipe_to_program,
bool subvp_prev_use);
+void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params);
+
void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index 8085f2acb1a9..777b2fac20c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -56,6 +56,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
@@ -109,7 +110,8 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.commit_subvp_config = dcn32_commit_subvp_config,
.enable_phantom_streams = dcn32_enable_phantom_streams,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
- .update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
+ .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast,
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
.update_dsc_pg = dcn32_update_dsc_pg,
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
@@ -153,6 +155,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.update_mall_sel = dcn32_update_mall_sel,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
+ .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio,
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
};
@@ -161,8 +164,4 @@ void dcn32_hw_sequencer_init_functions(struct dc *dc)
dc->hwss = dcn32_funcs;
dc->hwseq->funcs = dcn32_private_funcs;
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- dc->hwss.init_hw = dcn20_fpga_init_hw;
- dc->hwseq->funcs.init_pipes = NULL;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index c8041cfd594d..3082da04a63d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -64,7 +64,7 @@ void mpc32_mpc_init(struct mpc *mpc)
}
}
-static void mpc32_power_on_blnd_lut(
+void mpc32_power_on_blnd_lut(
struct mpc *mpc,
uint32_t mpcc_id,
bool power_on)
@@ -120,7 +120,7 @@ static enum dc_lut_mode mpc32_get_post1dlut_current(struct mpc *mpc, uint32_t mp
return mode;
}
-static void mpc32_configure_post1dlut(
+void mpc32_configure_post1dlut(
struct mpc *mpc,
uint32_t mpcc_id,
bool is_ram_a)
@@ -163,7 +163,7 @@ static void mpc32_post1dlut_get_reg_field(
}
/*program blnd lut RAM A*/
-static void mpc32_program_post1dluta_settings(
+void mpc32_program_post1dluta_settings(
struct mpc *mpc,
uint32_t mpcc_id,
const struct pwl_params *params)
@@ -192,7 +192,7 @@ static void mpc32_program_post1dluta_settings(
}
/*program blnd lut RAM B*/
-static void mpc32_program_post1dlutb_settings(
+void mpc32_program_post1dlutb_settings(
struct mpc *mpc,
uint32_t mpcc_id,
const struct pwl_params *params)
@@ -220,7 +220,7 @@ static void mpc32_program_post1dlutb_settings(
cm_helper_program_gamcor_xfer_func(mpc->ctx, params, &gam_regs);
}
-static void mpc32_program_post1dlut_pwl(
+void mpc32_program_post1dlut_pwl(
struct mpc *mpc,
uint32_t mpcc_id,
const struct pwl_result_data *rgb,
@@ -321,7 +321,7 @@ static enum dc_lut_mode mpc32_get_shaper_current(struct mpc *mpc, uint32_t mpcc_
}
-static void mpc32_configure_shaper_lut(
+void mpc32_configure_shaper_lut(
struct mpc *mpc,
bool is_ram_a,
uint32_t mpcc_id)
@@ -336,7 +336,7 @@ static void mpc32_configure_shaper_lut(
}
-static void mpc32_program_shaper_luta_settings(
+void mpc32_program_shaper_luta_settings(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -486,7 +486,7 @@ static void mpc32_program_shaper_luta_settings(
}
-static void mpc32_program_shaper_lutb_settings(
+void mpc32_program_shaper_lutb_settings(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -637,7 +637,7 @@ static void mpc32_program_shaper_lutb_settings(
}
-static void mpc32_program_shaper_lut(
+void mpc32_program_shaper_lut(
struct mpc *mpc,
const struct pwl_result_data *rgb,
uint32_t num,
@@ -671,7 +671,7 @@ static void mpc32_program_shaper_lut(
}
-static void mpc32_power_on_shaper_3dlut(
+void mpc32_power_on_shaper_3dlut(
struct mpc *mpc,
uint32_t mpcc_id,
bool power_on)
@@ -789,7 +789,7 @@ static enum dc_lut_mode get3dlut_config(
}
-static void mpc32_select_3dlut_ram(
+void mpc32_select_3dlut_ram(
struct mpc *mpc,
enum dc_lut_mode mode,
bool is_color_channel_12bits,
@@ -803,7 +803,7 @@ static void mpc32_select_3dlut_ram(
}
-static void mpc32_select_3dlut_ram_mask(
+void mpc32_select_3dlut_ram_mask(
struct mpc *mpc,
uint32_t ram_selection_mask,
uint32_t mpcc_id)
@@ -816,7 +816,7 @@ static void mpc32_select_3dlut_ram_mask(
}
-static void mpc32_set3dlut_ram12(
+void mpc32_set3dlut_ram12(
struct mpc *mpc,
const struct dc_rgb *lut,
uint32_t entries,
@@ -848,7 +848,7 @@ static void mpc32_set3dlut_ram12(
}
-static void mpc32_set3dlut_ram10(
+void mpc32_set3dlut_ram10(
struct mpc *mpc,
const struct dc_rgb *lut,
uint32_t entries,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
index 2c2ecd053806..9ac584fa89ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.h
@@ -332,4 +332,65 @@ void dcn32_mpc_construct(struct dcn30_mpc *mpc30,
int num_mpcc,
int num_rmu);
+void mpc32_power_on_blnd_lut(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ bool power_on);
+void mpc32_program_post1dlut_pwl(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ const struct pwl_result_data *rgb,
+ uint32_t num);
+void mpc32_program_post1dlutb_settings(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ const struct pwl_params *params);
+void mpc32_program_post1dluta_settings(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ const struct pwl_params *params);
+void mpc32_configure_post1dlut(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ bool is_ram_a);
+void mpc32_program_shaper_lut(
+ struct mpc *mpc,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ uint32_t mpcc_id);
+void mpc32_program_shaper_lutb_settings(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id);
+void mpc32_program_shaper_luta_settings(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id);
+void mpc32_configure_shaper_lut(
+ struct mpc *mpc,
+ bool is_ram_a,
+ uint32_t mpcc_id);
+void mpc32_power_on_shaper_3dlut(
+ struct mpc *mpc,
+ uint32_t mpcc_id,
+ bool power_on);
+void mpc32_set3dlut_ram10(
+ struct mpc *mpc,
+ const struct dc_rgb *lut,
+ uint32_t entries,
+ uint32_t mpcc_id);
+void mpc32_set3dlut_ram12(
+ struct mpc *mpc,
+ const struct dc_rgb *lut,
+ uint32_t entries,
+ uint32_t mpcc_id);
+void mpc32_select_3dlut_ram_mask(
+ struct mpc *mpc,
+ uint32_t ram_selection_mask,
+ uint32_t mpcc_id);
+void mpc32_select_3dlut_ram(
+ struct mpc *mpc,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits,
+ uint32_t mpcc_id);
#endif //__DC_MPCC_DCN32_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index 2ee798965bc2..8abb94f60078 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -98,7 +98,7 @@ static void optc32_set_odm_combine(struct timing_generator *optc, int *opp_id, i
optc1->opp_count = opp_cnt;
}
-static void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
+void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -106,8 +106,11 @@ static void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, b
OTG_H_TIMING_DIV_MODE_MANUAL, manual_mode ? 1 : 0);
}
/**
- * Enable CRTC
- * Enable CRTC - call ASIC Control Object to enable Timing generator.
+ * optc32_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator.
+ *
+ * @optc: timing_generator instance.
+ *
+ * Return: If CRTC is enabled, return true.
*/
static bool optc32_enable_crtc(struct timing_generator *optc)
{
@@ -245,16 +248,9 @@ static void optc32_set_drr(
}
optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
- optc32_setup_manual_trigger(optc);
- } else {
- REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
- OTG_SET_V_TOTAL_MIN_MASK, 0,
- OTG_V_TOTAL_MIN_SEL, 0,
- OTG_V_TOTAL_MAX_SEL, 0,
- OTG_FORCE_LOCK_ON_EVENT, 0);
-
- optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
+
+ optc32_setup_manual_trigger(optc);
}
static struct timing_generator_funcs dcn32_tg_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
index b92ba8c75694..abf0121a1006 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
@@ -179,5 +179,6 @@
SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
void dcn32_timing_generator_init(struct optc *optc1);
+void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode);
#endif /* __DC_OPTC_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 22dd1ebea618..935cd23e6a01 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -726,28 +726,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.override_dispclk_programming = true,
.disable_fpo_optimizations = false,
.fpo_vactive_margin_us = 2000, // 2000us
- .disable_fpo_vactive = true,
+ .disable_fpo_vactive = false,
.disable_boot_optimizations = false,
-};
-
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_dsc_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = false,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .enable_tri_buf = true,
- .use_max_lb = true,
- .force_disable_subvp = true
+ .disable_subvp_high_refresh = false,
+ .disable_dp_plus_plus_wa = true,
+ .fpo_vactive_min_active_margin_us = 200,
+ .fpo_vactive_max_blank_us = 1000,
+ .enable_legacy_fast_update = false,
};
static struct dce_aux *dcn32_aux_engine_create(
@@ -1353,15 +1338,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn32_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hpo_dp_stream_encoder = dcn32_hpo_dp_stream_encoder_create,
- .create_hpo_dp_link_encoder = dcn32_hpo_dp_link_encoder_create,
- .create_hwseq = dcn32_hwseq_create,
-};
-
static void dcn32_resource_destruct(struct dcn32_resource_pool *pool)
{
unsigned int i;
@@ -1733,8 +1709,8 @@ void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe &&
- pipe->plane_state && pipe->stream &&
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_is_pipe_type(pipe, DPP_PIPE) &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
phantom_plane = pipe->plane_state;
phantom_stream = pipe->stream;
@@ -1888,6 +1864,8 @@ bool dcn32_validate_bandwidth(struct dc *dc,
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ dcn32_override_min_req_memclk(dc, context);
+
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
@@ -1914,7 +1892,7 @@ int dcn32_populate_dml_pipes_from_context(
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe;
+ struct pipe_ctx *pipe = NULL;
bool subvp_in_use = false;
struct dc_crtc_timing *timing;
bool vsr_odm_support = false;
@@ -2060,7 +2038,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.validate_bandwidth = dcn32_validate_bandwidth,
.calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg,
.populate_dml_pipes = dcn32_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -2198,6 +2176,7 @@ static bool dcn32_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.seamless_odm = true;
+ dc->caps.max_v_total = (1 << 15) - 1;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
@@ -2236,6 +2215,7 @@ static bool dcn32_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
+ dc->config.dc_mode_clk_limit_support = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -2254,10 +2234,7 @@ static bool dcn32_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
- dc->debug = debug_defaults_diags;
- } else
- dc->debug = debug_defaults_diags;
+
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
@@ -2313,8 +2290,7 @@ static bool dcn32_resource_construct(
}
/* DML */
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32);
+ dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32);
/* IRQ Service */
init_data.ctx = dc->ctx;
@@ -2451,9 +2427,8 @@ static bool dcn32_resource_construct(
/* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
- goto create_fail;
+ &res_create_funcs))
+ goto create_fail;
/* HW Sequencer init functions and Plane caps */
dcn32_hw_sequencer_init_functions(dc);
@@ -2510,109 +2485,115 @@ struct resource_pool *dcn32_create_resource_pool(
return NULL;
}
-static struct pipe_ctx *find_idle_secondary_pipe_check_mpo(
- struct resource_context *res_ctx,
+/*
+ * Find the most optimal free pipe from res_ctx, which could be used as a
+ * secondary dpp pipe for input opp head pipe.
+ *
+ * a free pipe - a pipe in input res_ctx not yet used for any streams or
+ * planes.
+ * secondary dpp pipe - a pipe gets inserted to a head OPP pipe's MPC blending
+ * tree. This is typical used for rendering MPO planes or additional offset
+ * areas in MPCC combine.
+ *
+ * Hardware Transition Minimization Algorithm for Finding a Secondary DPP Pipe
+ * -------------------------------------------------------------------------
+ *
+ * PROBLEM:
+ *
+ * 1. There is a hardware limitation that a secondary DPP pipe cannot be
+ * transferred from one MPC blending tree to the other in a single frame.
+ * Otherwise it could cause glitches on the screen.
+ *
+ * For instance, we cannot transition from state 1 to state 2 in one frame. This
+ * is because PIPE1 is transferred from PIPE0's MPC blending tree over to
+ * PIPE2's MPC blending tree, which is not supported by hardware.
+ * To support this transition we need to first remove PIPE1 from PIPE0's MPC
+ * blending tree in one frame and then insert PIPE1 to PIPE2's MPC blending tree
+ * in the next frame. This is not optimal as it will delay the flip for two
+ * frames.
+ *
+ * State 1:
+ * PIPE0 -- secondary DPP pipe --> (PIPE1)
+ * PIPE2 -- secondary DPP pipe --> NONE
+ *
+ * State 2:
+ * PIPE0 -- secondary DPP pipe --> NONE
+ * PIPE2 -- secondary DPP pipe --> (PIPE1)
+ *
+ * 2. We want to in general minimize the unnecessary changes in pipe topology.
+ * If a pipe is already added in current blending tree and there are no changes
+ * to plane topology, we don't want to swap it with another free pipe
+ * unnecessarily in every update. Powering up and down a pipe would require a
+ * full update which delays the flip for 1 frame. If we use the original pipe
+ * we don't have to toggle its power. So we can flip faster.
+ */
+static int find_optimal_free_pipe_as_secondary_dpp_pipe(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
const struct resource_pool *pool,
- const struct pipe_ctx *primary_pipe)
+ const struct pipe_ctx *new_opp_head)
{
- int i;
- struct pipe_ctx *secondary_pipe = NULL;
- struct pipe_ctx *next_odm_mpo_pipe = NULL;
- int primary_index, preferred_pipe_idx;
- struct pipe_ctx *old_primary_pipe = NULL;
+ const struct pipe_ctx *cur_opp_head;
+ int free_pipe_idx;
- /*
- * Modified from find_idle_secondary_pipe
- * With windowed MPO and ODM, we want to avoid the case where we want a
- * free pipe for the left side but the free pipe is being used on the
- * right side.
- * Add check on current_state if the primary_pipe is the left side,
- * to check the right side ( primary_pipe->next_odm_pipe ) to see if
- * it is using a pipe for MPO ( primary_pipe->next_odm_pipe->bottom_pipe )
- * - If so, then don't use this pipe
- * EXCEPTION - 3 plane ( 2 MPO plane ) case
- * - in this case, the primary pipe has already gotten a free pipe for the
- * MPO window in the left
- * - when it tries to get a free pipe for the MPO window on the right,
- * it will see that it is already assigned to the right side
- * ( primary_pipe->next_odm_pipe ). But in this case, we want this
- * free pipe, since it will be for the right side. So add an
- * additional condition, that skipping the free pipe on the right only
- * applies if the primary pipe has no bottom pipe currently assigned
- */
- if (primary_pipe) {
- primary_index = primary_pipe->pipe_idx;
- old_primary_pipe = &primary_pipe->stream->ctx->dc->current_state->res_ctx.pipe_ctx[primary_index];
- if ((old_primary_pipe->next_odm_pipe) && (old_primary_pipe->next_odm_pipe->bottom_pipe)
- && (!primary_pipe->bottom_pipe))
- next_odm_mpo_pipe = old_primary_pipe->next_odm_pipe->bottom_pipe;
-
- preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
- if ((res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) &&
- !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == preferred_pipe_idx)) {
- secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
- secondary_pipe->pipe_idx = preferred_pipe_idx;
- }
- }
+ cur_opp_head = &cur_res_ctx->pipe_ctx[new_opp_head->pipe_idx];
+ free_pipe_idx = resource_find_free_pipe_used_in_cur_mpc_blending_tree(
+ cur_res_ctx, new_res_ctx, cur_opp_head);
- /*
- * search backwards for the second pipe to keep pipe
- * assignment more consistent
+ /* Up until here if we have not found a free secondary pipe, we will
+ * need to wait for at least one frame to complete the transition
+ * sequence.
*/
- if (!secondary_pipe)
- for (i = pool->pipe_count - 1; i >= 0; i--) {
- if ((res_ctx->pipe_ctx[i].stream == NULL) &&
- !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == i)) {
- secondary_pipe = &res_ctx->pipe_ctx[i];
- secondary_pipe->pipe_idx = i;
- break;
- }
- }
+ if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ free_pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx(
+ cur_res_ctx, new_res_ctx, pool);
+
+ /* Up until here if we have not found a free secondary pipe, we will
+ * need to wait for at least two frames to complete the transition
+ * sequence. It really doesn't matter which pipe we decide take from
+ * current enabled pipes. It won't save our frame time when we swap only
+ * one pipe or more pipes.
+ */
+ if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ free_pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
+ cur_res_ctx, new_res_ctx, pool);
+
+ if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ free_pipe_idx = resource_find_any_free_pipe(new_res_ctx, pool);
- return secondary_pipe;
+ return free_pipe_idx;
}
-struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
- struct dc_state *state,
+struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream,
- struct pipe_ctx *head_pipe)
+ const struct pipe_ctx *opp_head_pipe)
{
- struct resource_context *res_ctx = &state->res_ctx;
- struct pipe_ctx *idle_pipe, *pipe;
- struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx;
- int head_index;
-
- if (!head_pipe)
- ASSERT(0);
-
- /*
- * Modified from dcn20_acquire_idle_pipe_for_layer
- * Check if head_pipe in old_context already has bottom_pipe allocated.
- * - If so, check if that pipe is available in the current context.
- * -- If so, reuse pipe from old_context
- */
- head_index = head_pipe->pipe_idx;
- pipe = &old_ctx->pipe_ctx[head_index];
- if (pipe->bottom_pipe && res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx].stream == NULL) {
- idle_pipe = &res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx];
- idle_pipe->pipe_idx = pipe->bottom_pipe->pipe_idx;
+ int free_pipe_idx =
+ find_optimal_free_pipe_as_secondary_dpp_pipe(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx,
+ pool, opp_head_pipe);
+ struct pipe_ctx *free_pipe;
+
+ if (free_pipe_idx >= 0) {
+ free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx];
+ free_pipe->pipe_idx = free_pipe_idx;
+ free_pipe->stream = opp_head_pipe->stream;
+ free_pipe->stream_res.tg = opp_head_pipe->stream_res.tg;
+ free_pipe->stream_res.opp = opp_head_pipe->stream_res.opp;
+
+ free_pipe->plane_res.hubp = pool->hubps[free_pipe->pipe_idx];
+ free_pipe->plane_res.ipp = pool->ipps[free_pipe->pipe_idx];
+ free_pipe->plane_res.dpp = pool->dpps[free_pipe->pipe_idx];
+ free_pipe->plane_res.mpcc_inst =
+ pool->dpps[free_pipe->pipe_idx]->inst;
} else {
- idle_pipe = find_idle_secondary_pipe_check_mpo(res_ctx, pool, head_pipe);
- if (!idle_pipe)
- return NULL;
+ ASSERT(opp_head_pipe);
+ free_pipe = NULL;
}
- idle_pipe->stream = head_pipe->stream;
- idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
- idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
-
- idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
- idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
-
- return idle_pipe;
+ return free_pipe;
}
unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 3937dbc1e552..103a2b54d025 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -37,9 +37,10 @@
#define DCN3_2_MBLK_WIDTH 128
#define DCN3_2_MBLK_HEIGHT_4BPE 128
#define DCN3_2_MBLK_HEIGHT_8BPE 64
-#define DCN3_2_VMIN_DISPCLK_HZ 717000000
#define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq
-#define DCN3_2_MIN_ACTIVE_SWITCH_MARGIN_FPO_US 100 // Only allow FPO + Vactive if active margin >= 100
+#define SUBVP_HIGH_REFRESH_LIST_LEN 3
+#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
+#define DCN3_2_VMIN_DISPCLK_HZ 717000000
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -47,6 +48,15 @@
extern struct _vcs_dpi_ip_params_st dcn3_2_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc;
+struct subvp_high_refresh_list {
+ int min_refresh;
+ int max_refresh;
+ struct resolution {
+ int width;
+ int height;
+ } res[SUBVP_HIGH_REFRESH_LIST_LEN];
+};
+
struct dcn32_resource_pool {
struct resource_pool base;
};
@@ -126,11 +136,11 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
bool dcn32_is_center_timing(struct pipe_ctx *pipe);
bool dcn32_is_psr_capable(struct pipe_ctx *pipe);
-struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
- struct dc_state *state,
+struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream,
- struct pipe_ctx *head_pipe);
+ const struct pipe_ctx *opp_head_pipe);
void dcn32_determine_det_override(struct dc *dc,
struct dc_state *context,
@@ -151,10 +161,18 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre
bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
+bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe);
+
unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans);
double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context);
+bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height);
+
+bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context);
+
+bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index eeca16faf31a..3ad2b48954e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -94,18 +94,15 @@ uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
}
/**
- * ********************************************************************************************
- * dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP
+ * dcn32_helper_calculate_num_ways_for_subvp(): Calculate number of ways needed for SubVP
*
* Gets total allocation required for the phantom viewport calculated by DML in bytes and
* converts to number of cache ways.
*
- * @param [in] dc: current dc state
- * @param [in] context: new dc state
+ * @dc: current dc state
+ * @context: new dc state
*
- * @return: number of ways required for SubVP
- *
- * ********************************************************************************************
+ * Return: number of ways required for SubVP
*/
uint32_t dcn32_helper_calculate_num_ways_for_subvp(
struct dc *dc,
@@ -258,11 +255,8 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
return psr_capable;
}
-#define DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER 7
-
/**
- * *******************************************************************************************
- * dcn32_determine_det_override: Determine DET allocation for each pipe
+ * dcn32_determine_det_override(): Determine DET allocation for each pipe
*
* This function determines how much DET to allocate for each pipe. The total number of
* DET segments will be split equally among each of the streams, and after that the DET
@@ -271,6 +265,7 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
* If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
* number of DET for that given plane will be split among the pipes driving that plane.
*
+ *
* High level algorithm:
* 1. Split total DET among number of streams
* 2. For each stream, split DET among the planes
@@ -278,25 +273,11 @@ bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
* among those pipes.
* 4. Assign the DET override to the DML pipes.
*
- * Special cases:
- *
- * For two displays that have a large difference in pixel rate, we may experience
- * underflow on the larger display when we divide the DET equally. For this, we
- * will implement a modified algorithm to assign more DET to larger display.
- *
- * 1. Calculate difference in pixel rates ( multiplier ) between two displays
- * 2. If the multiplier exceeds DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER, then
- * implement the modified DET override algorithm.
- * 3. Assign smaller DET size for lower pixel display and higher DET size for
- * higher pixel display
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ * @pipes: Array of DML pipes
*
- * @param [in]: dc: Current DC state
- * @param [in]: context: New DC state to be programmed
- * @param [in]: pipes: Array of DML pipes
- *
- * @return: void
- *
- * *******************************************************************************************
+ * Return: void
*/
void dcn32_determine_det_override(struct dc *dc,
struct dc_state *context,
@@ -309,31 +290,10 @@ void dcn32_determine_det_override(struct dc *dc,
struct dc_plane_state *current_plane = NULL;
uint8_t stream_count = 0;
- int phy_pix_clk_mult, lower_mode_stream_index;
- int phy_pix_clk[MAX_PIPES] = {0};
- bool use_new_det_override_algorithm = false;
-
for (i = 0; i < context->stream_count; i++) {
/* Don't count SubVP streams for DET allocation */
- if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
- phy_pix_clk[i] = context->streams[i]->phy_pix_clk;
+ if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM)
stream_count++;
- }
- }
-
- /* Check for special case with two displays, one with much higher pixel rate */
- if (stream_count == 2) {
- ASSERT((phy_pix_clk[0] > 0) && (phy_pix_clk[1] > 0));
- if (phy_pix_clk[0] < phy_pix_clk[1]) {
- lower_mode_stream_index = 0;
- phy_pix_clk_mult = phy_pix_clk[1] / phy_pix_clk[0];
- } else {
- lower_mode_stream_index = 1;
- phy_pix_clk_mult = phy_pix_clk[0] / phy_pix_clk[1];
- }
-
- if (phy_pix_clk_mult >= DCN3_2_NEW_DET_OVERRIDE_MIN_MULTIPLIER)
- use_new_det_override_algorithm = true;
}
if (stream_count > 0) {
@@ -342,13 +302,6 @@ void dcn32_determine_det_override(struct dc *dc,
if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
continue;
- if (use_new_det_override_algorithm) {
- if (i == lower_mode_stream_index)
- stream_segments = 4;
- else
- stream_segments = 14;
- }
-
if (context->stream_status[i].plane_count > 0)
plane_segments = stream_segments / context->stream_status[i].plane_count;
else
@@ -432,8 +385,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
}
/**
- * *******************************************************************************************
- * dcn32_save_mall_state: Save MALL (SubVP) state for fast validation cases
+ * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases
*
* This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
* there are situations where a shallow copy of the dc->current_state is created for the
@@ -446,13 +398,11 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
* NOTE: This function ONLY works if the streams are not moved to a different pipe in the
* validation. We don't expect this to happen in fast_validation=1 cases.
*
- * @param [in]: dc: Current DC state
- * @param [in]: context: New DC state to be programmed
- * @param [out]: temp_config: struct used to cache the existing MALL state
- *
- * @return: void
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ * @temp_config: struct used to cache the existing MALL state
*
- * *******************************************************************************************
+ * Return: void
*/
void dcn32_save_mall_state(struct dc *dc,
struct dc_state *context,
@@ -472,18 +422,15 @@ void dcn32_save_mall_state(struct dc *dc,
}
/**
- * *******************************************************************************************
- * dcn32_restore_mall_state: Restore MALL (SubVP) state for fast validation cases
+ * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases
*
* Restore the MALL state based on the previously saved state from dcn32_save_mall_state
*
- * @param [in]: dc: Current DC state
- * @param [in/out]: context: New DC state to be programmed, restore MALL state into here
- * @param [in]: temp_config: struct that has the cached MALL state
+ * @dc: Current DC state
+ * @context: New DC state to be programmed, restore MALL state into here
+ * @temp_config: struct that has the cached MALL state
*
- * @return: void
- *
- * *******************************************************************************************
+ * Return: void
*/
void dcn32_restore_mall_state(struct dc *dc,
struct dc_state *context,
@@ -588,10 +535,11 @@ static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream)
}
/**
- * dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch - Determines if config can support FPO
+ * dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch() - Determines if config can
+ * support FPO
*
- * @param [in]: dc - current dc state
- * @param [in]: context - new dc state
+ * @dc: current dc state
+ * @context: new dc state
*
* Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL
*/
@@ -626,7 +574,7 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre
DC_FP_END();
DC_FP_START();
- is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, DCN3_2_MIN_ACTIVE_SWITCH_MARGIN_FPO_US);
+ is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us);
DC_FP_END();
if (!is_fpo_vactive || dc->debug.disable_fpo_vactive)
return NULL;
@@ -647,12 +595,140 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre
if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(fpo_candidate_stream, fpo_vactive_margin_us))
return NULL;
- // check if freesync enabled
if (!fpo_candidate_stream->allow_freesync)
return NULL;
- if (fpo_candidate_stream->vrr_active_variable)
+ if (fpo_candidate_stream->vrr_active_variable && dc->debug.disable_fams_gaming)
return NULL;
return fpo_candidate_stream;
}
+
+bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height)
+{
+ bool is_native_scaling = false;
+
+ if (pipe->stream->timing.h_addressable == width &&
+ pipe->stream->timing.v_addressable == height &&
+ pipe->plane_state->src_rect.width == width &&
+ pipe->plane_state->src_rect.height == height &&
+ pipe->plane_state->dst_rect.width == width &&
+ pipe->plane_state->dst_rect.height == height)
+ is_native_scaling = true;
+
+ return is_native_scaling;
+}
+
+/**
+ * dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible
+ *
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ *
+ * SubVP + DRR is admissible under the following conditions:
+ * - Config must have 2 displays (i.e., 2 non-phantom master pipes)
+ * - One display is SubVP
+ * - Other display must have Freesync enabled
+ * - The potential DRR display must not be PSR capable
+ *
+ * Return: True if admissible, false otherwise
+ */
+bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
+{
+ bool result = false;
+ uint32_t i;
+ uint8_t subvp_count = 0;
+ uint8_t non_subvp_pipes = 0;
+ bool drr_pipe_found = false;
+ bool drr_psr_capable = false;
+ uint64_t refresh_rate = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (resource_is_pipe_type(pipe, OPP_HEAD) &&
+ resource_is_pipe_type(pipe, DPP_PIPE)) {
+ if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ subvp_count++;
+
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+ }
+ if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ non_subvp_pipes++;
+ drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
+ if (pipe->stream->ignore_msa_timing_param &&
+ (pipe->stream->allow_freesync || pipe->stream->vrr_active_variable)) {
+ drr_pipe_found = true;
+ }
+ }
+ }
+ }
+
+ if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
+ ((uint32_t)refresh_rate < 120))
+ result = true;
+
+ return result;
+}
+
+/**
+ * dcn32_subvp_vblank_admissable() - Determine if SubVP + Vblank config is admissible
+ *
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ * @vlevel: Voltage level calculated by DML
+ *
+ * SubVP + Vblank is admissible under the following conditions:
+ * - Config must have 2 displays (i.e., 2 non-phantom master pipes)
+ * - One display is SubVP
+ * - Other display must not have Freesync capability
+ * - DML must have output DRAM clock change support as SubVP + Vblank
+ * - The potential vblank display must not be PSR capable
+ *
+ * Return: True if admissible, false otherwise
+ */
+bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel)
+{
+ bool result = false;
+ uint32_t i;
+ uint8_t subvp_count = 0;
+ uint8_t non_subvp_pipes = 0;
+ bool drr_pipe_found = false;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ bool vblank_psr_capable = false;
+ uint64_t refresh_rate = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (resource_is_pipe_type(pipe, OPP_HEAD) &&
+ resource_is_pipe_type(pipe, DPP_PIPE)) {
+ if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ subvp_count++;
+
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+ }
+ if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ non_subvp_pipes++;
+ vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
+ if (pipe->stream->ignore_msa_timing_param &&
+ (pipe->stream->allow_freesync || pipe->stream->vrr_active_variable)) {
+ drr_pipe_found = true;
+ }
+ }
+ }
+ }
+
+ if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable &&
+ ((uint32_t)refresh_rate < 120) &&
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
+ result = true;
+
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index a60ddb343d13..8d73cceb485b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -725,31 +725,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.override_dispclk_programming = true,
.disable_fpo_optimizations = false,
.fpo_vactive_margin_us = 2000, // 2000us
- .disable_fpo_vactive = true,
+ .disable_fpo_vactive = false,
.disable_boot_optimizations = false,
+ .disable_subvp_high_refresh = false,
+ .fpo_vactive_min_active_margin_us = 200,
+ .fpo_vactive_max_blank_us = 1000,
+ .enable_legacy_fast_update = false,
+ .disable_dc_mode_overwrite = true,
};
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_dpp_power_gate = true,
- .disable_hubp_power_gate = true,
- .disable_dsc_power_gate = true,
- .disable_clock_gate = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .disable_stutter = false,
- .scl_reset_length10 = true,
- .dwb_fi_phase = -1, // -1 = disable
- .dmub_command_table = true,
- .enable_tri_buf = true,
- .use_max_lb = true,
- .force_disable_subvp = true,
-};
-
-
static struct dce_aux *dcn321_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1340,15 +1324,6 @@ static const struct resource_create_funcs res_create_funcs = {
.create_hwseq = dcn321_hwseq_create,
};
-static const struct resource_create_funcs res_create_maximus_funcs = {
- .read_dce_straps = NULL,
- .create_audio = NULL,
- .create_stream_encoder = NULL,
- .create_hpo_dp_stream_encoder = dcn321_hpo_dp_stream_encoder_create,
- .create_hpo_dp_link_encoder = dcn321_hpo_dp_link_encoder_create,
- .create_hwseq = dcn321_hwseq_create,
-};
-
static void dcn321_resource_destruct(struct dcn321_resource_pool *pool)
{
unsigned int i;
@@ -1613,7 +1588,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.validate_bandwidth = dcn32_validate_bandwidth,
.calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg,
.populate_dml_pipes = dcn32_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer,
+ .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1735,9 +1710,9 @@ static bool dcn321_resource_construct(
dc->caps.subvp_pstate_allow_width_us = 20;
dc->caps.subvp_vertical_int_margin_us = 30;
dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
@@ -1745,6 +1720,7 @@ static bool dcn321_resource_construct(
dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.max_v_total = (1 << 15) - 1;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
@@ -1780,6 +1756,7 @@ static bool dcn321_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->config.dc_mode_clk_limit_support = true;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -1798,10 +1775,7 @@ static bool dcn321_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
- dc->debug = debug_defaults_diags;
- } else
- dc->debug = debug_defaults_diags;
+
// Init the vm_helper
if (dc->vm_helper)
vm_helper_init(dc->vm_helper, 16);
@@ -1857,8 +1831,7 @@ static bool dcn321_resource_construct(
}
/* DML */
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32);
+ dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32);
/* IRQ Service */
init_data.ctx = dc->ctx;
@@ -1990,9 +1963,8 @@ static bool dcn321_resource_construct(
/* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
- (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
- &res_create_funcs : &res_create_maximus_funcs)))
- goto create_fail;
+ &res_create_funcs))
+ goto create_fail;
/* HW Sequencer init functions and Plane caps */
dcn32_hw_sequencer_init_functions(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 9a3f2a44f882..d0eed3b4771e 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -40,6 +40,7 @@
struct dmub_srv;
struct dc_dmub_srv;
+union dmub_rb_cmd;
irq_handler_idx dm_register_interrupt(
struct dc_context *ctx,
@@ -274,6 +275,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
#define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX)
/*
+ * DMUB Interfaces
+ */
+bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
+bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
+
+/*
* Debug and verification hooks
*/
@@ -285,4 +292,6 @@ void dm_dtn_log_append_v(struct dc_context *ctx,
void dm_dtn_log_end(struct dc_context *ctx,
struct dc_log_buffer_ctx *log_ctx);
+char *dce_version_to_string(const int version);
+
#endif /* __DM_SERVICES_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index b52ba6ffabe1..facf269c4326 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -269,4 +269,10 @@ struct dtn_min_clk_info {
uint32_t min_memory_clock_khz;
};
+enum dm_dmub_wait_type {
+ DM_DMUB_WAIT_TYPE_NO_WAIT,
+ DM_DMUB_WAIT_TYPE_WAIT,
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY,
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 01db035589c5..77cf5545c94c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -38,6 +38,11 @@ ifdef CONFIG_ARM64
dml_rcflags := -mgeneral-regs-only
endif
+ifdef CONFIG_LOONGARCH
+dml_ccflags := -mfpu=64
+dml_rcflags := -msoft-float
+endif
+
ifdef CONFIG_CC_IS_GCC
ifneq ($(call gcc-min-version, 70100),y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c
index 0100a6053ab6..f2dfa96f9ef5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c
@@ -3015,7 +3015,7 @@ static bool all_displays_in_sync(const struct pipe_ctx pipe[],
int i, num_active_pipes = 0;
for (i = 0; i < pipe_count; i++) {
- if (!pipe[i].stream || pipe[i].top_pipe)
+ if (!resource_is_pipe_type(&pipe[i], OPP_HEAD))
continue;
active_pipes[num_active_pipes++] = &pipe[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index e73f089c84bb..50b0434354f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -1258,7 +1258,7 @@ bool dcn_validate_bandwidth(
hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
} else {
/* pipe not split previously needs split */
- hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe);
+ hsplit_pipe = resource_find_free_secondary_pipe_legacy(&context->res_ctx, pool, pipe);
ASSERT(hsplit_pipe);
split_stream_across_pipes(&context->res_ctx, pool, pipe, hsplit_pipe);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index f1c1a4b5fcac..8afda5ecc0cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -948,10 +948,10 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
{
int plane_count;
int i;
- unsigned int optimized_min_dst_y_next_start_us;
+ unsigned int min_dst_y_next_start_us;
plane_count = 0;
- optimized_min_dst_y_next_start_us = 0;
+ min_dst_y_next_start_us = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
@@ -973,19 +973,18 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
+ struct dc_stream_state *current_stream = context->streams[0];
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
+ bool isFreesyncVideo;
- if (dc_extended_blank_supported(dc)) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream == context->streams[0]
- && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max
- && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) {
- optimized_min_dst_y_next_start_us =
- context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us;
- break;
- }
+ isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
+ isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
+ min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
+ break;
}
}
@@ -993,7 +992,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
- if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
+ if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
@@ -1043,7 +1042,7 @@ void dcn20_calculate_dlg_params(struct dc *dc,
int pipe_cnt,
int vlevel)
{
- int i, pipe_idx;
+ int i, pipe_idx, active_hubp_count = 0;
dc_assert_fp_enabled();
@@ -1079,6 +1078,8 @@ void dcn20_calculate_dlg_params(struct dc *dc,
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ active_hubp_count++;
pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -1098,13 +1099,13 @@ void dcn20_calculate_dlg_params(struct dc *dc,
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
- if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
- dcn20_adjust_freesync_v_startup(
- &context->res_ctx.pipe_ctx[i].stream->timing,
- &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
pipe_idx++;
}
+ /* If DCN isn't making memory requests we can allow pstate change */
+ if (!active_hubp_count) {
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ }
/*save a original dppclock copy*/
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
@@ -1304,7 +1305,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc,
pipes[pipe_cnt].dout.is_virtual = 0;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
- switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
+ switch (resource_get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
case 1:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
break;
@@ -1885,6 +1886,17 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
}
+ if ((int)(bb->sr_exit_z8_time_us * 1000)
+ != dc->bb_overrides.sr_exit_z8_time_ns
+ && dc->bb_overrides.sr_exit_z8_time_ns) {
+ bb->sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+ }
+
+ if ((int)(bb->sr_enter_plus_exit_z8_time_us * 1000)
+ != dc->bb_overrides.sr_enter_plus_exit_z8_time_ns
+ && dc->bb_overrides.sr_enter_plus_exit_z8_time_ns) {
+ bb->sr_enter_plus_exit_z8_time_us = dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+ }
if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
&& dc->bb_overrides.urgent_latency_ns) {
bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
@@ -1915,6 +1927,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
+ int i = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -1938,6 +1951,15 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+ if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+ dcn20_adjust_freesync_v_startup(
+ &context->res_ctx.pipe_ctx[i].stream->timing,
+ &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
+ }
+
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
@@ -2210,6 +2232,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
+ int i = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -2238,6 +2261,15 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+ if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+ dcn20_adjust_freesync_v_startup(
+ &context->res_ctx.pipe_ctx[i].stream->timing,
+ &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
+ }
+
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 6266b0788387..7bf4bb7ad044 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -4356,12 +4356,16 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i][0],
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
-
- locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
- locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
- locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
- locals->EffectiveLBLatencyHidingSourceLinesChroma),
- locals->SwathHeightCPerState[i][j][k]);
+ if (locals->LinesInDETChroma) {
+ locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma +
+ dml_min(locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] *
+ locals->BytePerPixelInDETC[k] *
+ locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
+ locals->EffectiveLBLatencyHidingSourceLinesChroma),
+ locals->SwathHeightCPerState[i][j][k]);
+ } else {
+ locals->EffectiveDETLBLinesChroma = 0;
+ }
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index b7c2844d0cbe..57cf0358cc43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -810,7 +810,7 @@ static bool CalculatePrefetchSchedule(
*swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
} else {
*swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockHeight256BytesY) + myPipe->BlockHeight256BytesY;
- if (myPipe->BlockWidth256BytesC > 0)
+ if (myPipe->BlockHeight256BytesC > 0)
*swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
}
@@ -3194,7 +3194,7 @@ static void CalculateFlipSchedule(
unsigned int HostVMDynamicLevels;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
double HostVMInefficiencyFactor;
double VRatioClamped;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index a352c703e258..ccb4ad78f667 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -674,10 +674,19 @@ void dcn30_fpu_update_bw_bounding_box(struct dc *dc,
}
/**
- * Finds dummy_latency_index when MCLK switching using firmware based
- * vblank stretch is enabled. This function will iterate through the
- * table of dummy pstate latencies until the lowest value that allows
+ * dcn30_find_dummy_latency_index_for_fw_based_mclk_switch() - Finds
+ * dummy_latency_index when MCLK switching using firmware based vblank stretch
+ * is enabled. This function will iterate through the table of dummy pstate
+ * latencies until the lowest value that allows
* dm_allow_self_refresh_and_mclk_switch to happen is found
+ *
+ * @dc: Current DC state
+ * @context: new dc state
+ * @pipes: DML pipe params
+ * @pipe_cnt: number of DML pipes
+ * @vlevel: Voltage level calculated by DML
+ *
+ * Return: lowest dummy_latency_index value
*/
int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 7d0626e42ea6..ad741a723c0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -784,8 +784,7 @@ static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum o
Delay = Delay + 1;
// sft
Delay = Delay + 1;
- }
- else {
+ } else {
// sfr
Delay = Delay + 2;
// dsccif
@@ -3489,8 +3488,7 @@ static double TruncToValidBPP(
if (Format == dm_n422) {
MinDSCBPP = 7;
MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0;
- }
- else {
+ } else {
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16.0;
}
@@ -4939,8 +4937,8 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
v->TotImmediateFlipBytes = 0.0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
- v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k]
- + v->MetaRowBytes[i][j][k] + v->DPTEBytesPerRow[i][j][k];
+ v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k]
+ + v->MetaRowBytes[i][j][k] + v->DPTEBytesPerRow[i][j][k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@@ -5130,7 +5128,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
ViewportExceedsSurface = true;
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
- && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
+ && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
ViewportExceedsSurface = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index cd3cfcb2a2b0..0497a5d74a62 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -980,7 +980,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
unsigned int vstartup_start = 0;
unsigned int dst_x_after_scaler = 0;
- unsigned int dst_y_after_scaler = 0;
+ int dst_y_after_scaler = 0;
double line_wait = 0;
double dst_y_prefetch = 0;
double dst_y_per_vm_vblank = 0;
@@ -1171,6 +1171,8 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ if (dst_y_after_scaler < 0)
+ dst_y_after_scaler = 0;
// do some adjustment on the dst_after scaler to account for odm combine mode
dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 422f17aefd4a..6ce90678b33c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -333,45 +333,43 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
memcpy(s, dcn3_01_soc.clock_limits, sizeof(dcn3_01_soc.clock_limits));
/* Default clock levels are used for diags, which may lead to overclocking. */
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
- dcn3_01_ip.max_num_dpp = pool->base.pipe_count;
- dcn3_01_soc.num_chans = bw_params->num_channels;
-
- ASSERT(clk_table->num_entries);
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
+ dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+ dcn3_01_ip.max_num_dpp = pool->base.pipe_count;
+ dcn3_01_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
}
-
- s[i].state = i;
- s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
- s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- s[i].dram_bw_per_chan_gbps =
- dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- s[i].phyclk_d18_mhz =
- dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
- if (clk_table->num_entries) {
- dcn3_01_soc.num_states = clk_table->num_entries;
- /* duplicate last level */
- s[dcn3_01_soc.num_states] =
- dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
- s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
- }
+ s[i].state = i;
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+
+ if (clk_table->num_entries) {
+ dcn3_01_soc.num_states = clk_table->num_entries;
+ /* duplicate last level */
+ s[dcn3_01_soc.num_states] =
+ dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
+ s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
}
memcpy(dcn3_01_soc.clock_limits, s, sizeof(dcn3_01_soc.clock_limits));
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 59836570603a..deb6d162a2d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp(
int pipe_cnt,
int vlevel)
{
- int i, pipe_idx, active_hubp_count = 0;
+ int i, pipe_idx, total_det = 0, active_hubp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
@@ -563,6 +563,18 @@ void dcn31_calculate_wm_and_dlg_fp(
if (context->res_ctx.pipe_ctx[i].stream)
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
}
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ context->res_ctx.pipe_ctx[i].det_buffer_size_kb =
+ get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ if (context->res_ctx.pipe_ctx[i].det_buffer_size_kb > 384)
+ context->res_ctx.pipe_ctx[i].det_buffer_size_kb /= 2;
+ total_det += context->res_ctx.pipe_ctx[i].det_buffer_size_kb;
+ pipe_idx++;
+ }
+ context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det;
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -570,6 +582,7 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
int j;
dc_assert_fp_enabled();
@@ -577,59 +590,55 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
memcpy(s, dcn3_1_soc.clock_limits, sizeof(dcn3_1_soc.clock_limits));
// Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+ dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_1_soc.num_chans = bw_params->num_channels;
- dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_1_soc.num_chans = bw_params->num_channels;
+ ASSERT(clk_table->num_entries);
- ASSERT(clk_table->num_entries);
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
}
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
+ s[i].state = i;
- s[i].state = i;
-
- /* Clocks dependent on voltage level. */
- s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
- 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- s[i].dram_bw_per_chan_gbps =
- dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- s[i].phyclk_d18_mhz =
- dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- if (clk_table->num_entries) {
- dcn3_1_soc.num_states = clk_table->num_entries;
- }
+ /* Clocks dependent on voltage level. */
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
+ 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ if (clk_table->num_entries) {
+ dcn3_1_soc.num_states = clk_table->num_entries;
}
memcpy(dcn3_1_soc.clock_limits, s, sizeof(dcn3_1_soc.clock_limits));
@@ -643,10 +652,7 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
dcn3_1_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
}
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31_FPGA);
+ dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
}
void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -707,10 +713,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_15_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
}
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315);
- else
- dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA);
+ dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315);
}
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -726,71 +729,68 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
memcpy(s, dcn3_16_soc.clock_limits, sizeof(dcn3_16_soc.clock_limits));
// Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_16_soc.num_chans = bw_params->num_channels;
- dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_16_soc.num_chans = bw_params->num_channels;
-
- ASSERT(clk_table->num_entries);
+ ASSERT(clk_table->num_entries);
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <=
- clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
- // Ported from DCN315
- if (clk_table->num_entries == 1) {
- /*smu gives one DPM level, let's take the highest one*/
- closest_clk_lvl = dcn3_16_soc.num_states - 1;
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <=
+ clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
}
+ }
+ // Ported from DCN315
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_16_soc.num_states - 1;
+ }
- s[i].state = i;
+ s[i].state = i;
- /* Clocks dependent on voltage level. */
- s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- if (clk_table->num_entries == 1 &&
- s[i].dcfclk_mhz <
- dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
- /*SMU fix not released yet*/
- s[i].dcfclk_mhz =
- dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
- }
- s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
- 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- s[i].dram_bw_per_chan_gbps =
- dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- s[i].phyclk_d18_mhz =
- dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- if (clk_table->num_entries) {
- dcn3_16_soc.num_states = clk_table->num_entries;
+ /* Clocks dependent on voltage level. */
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ s[i].dcfclk_mhz <
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ s[i].dcfclk_mhz =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
}
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
+ 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ if (clk_table->num_entries) {
+ dcn3_16_soc.num_states = clk_table->num_entries;
}
memcpy(dcn3_16_soc.clock_limits, s, sizeof(dcn3_16_soc.clock_limits));
@@ -805,13 +805,21 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_16_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
}
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA);
+ dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31);
}
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
{
return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
}
+
+int dcn_get_approx_det_segs_required_for_pstate(
+ struct _vcs_dpi_soc_bounding_box_st *soc,
+ int pix_clk_100hz, int bpp, int seg_size_kb)
+{
+ /* Roughly calculate required crb to hide latency. In practice there is slightly
+ * more buffer available for latency hiding
+ */
+ return (int)(soc->dram_clock_change_latency_us * pix_clk_100hz * bpp
+ / 10240000 + seg_size_kb - 1) / seg_size_kb;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index 687d3522cc33..8f9c8faed260 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -47,6 +47,9 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc);
+int dcn_get_approx_det_segs_required_for_pstate(
+ struct _vcs_dpi_soc_bounding_box_st *soc,
+ int pix_clk_100hz, int bpp, int seg_size_kb);
int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index bd674dc30df3..adea459e7d36 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -532,7 +532,8 @@ static void CalculateStutterEfficiency(
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
- unsigned int DETBufferSizeInKByte,
+ bool DETSharedByAllDPP,
+ unsigned int DETBufferSizeInKByte[],
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
@@ -3118,7 +3119,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SurfaceWidthC[k],
v->SurfaceHeightY[k],
v->SurfaceHeightC[k],
- v->DETBufferSizeInKByte[0] * 1024,
+ v->DETBufferSizeInKByte[k] * 1024,
v->BlockHeight256BytesY[k],
v->BlockHeight256BytesC[k],
v->SurfaceTiling[k],
@@ -3313,7 +3314,8 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
- v->DETBufferSizeInKByte[0],
+ mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
+ v->DETBufferSizeInKByte,
dummy1,
dummy2,
v->SourceScan,
@@ -3503,7 +3505,7 @@ static void CalculateFlipSchedule(
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
double LineTime = v->HTotal[k] / v->PixelClock[k];
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
@@ -3779,14 +3781,16 @@ static noinline void CalculatePrefetchSchedulePerPlane(
&v->VReadyOffsetPix[k]);
}
-static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
+static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int DETBufferSizeInKByte[])
{
int i, total_pipes = 0;
for (i = 0; i < NumberOfActivePlanes; i++)
total_pipes += NoOfDPPThisState[i];
- *DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
- if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
- *DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
+ DETBufferSizeInKByte[0] = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
+ if (DETBufferSizeInKByte[0] > DCN3_15_MAX_DET_SIZE)
+ DETBufferSizeInKByte[0] = DCN3_15_MAX_DET_SIZE;
+ for (i = 1; i < NumberOfActivePlanes; i++)
+ DETBufferSizeInKByte[i] = DETBufferSizeInKByte[0];
}
@@ -4026,7 +4030,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateSwathAndDETConfiguration(
true,
v->NumberOfActivePlanes,
- v->DETBufferSizeInKByte[0],
+ mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
+ v->DETBufferSizeInKByte,
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
@@ -4130,7 +4135,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN31_MAX_FMT_420_BUFFER_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
- if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) {
+ if (v->Output[k] == dm_hdmi) {
+ FMTBufferExceeded = true;
+ } else if (v->HActive[k] / 2 > DCN31_MAX_FMT_420_BUFFER_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
@@ -4166,6 +4173,10 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|| (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
+ if (mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[k] > DCN3_15_MAX_DET_SIZE && v->NoOfDPP[i][j][k] < 2) {
+ v->MPCCombine[i][j][k] = true;
+ v->NoOfDPP[i][j][k] = 2;
+ }
}
v->TotalNumberOfActiveDPP[i][j] = 0;
v->TotalNumberOfSingleDPPPlanes[i][j] = 0;
@@ -4642,12 +4653,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
- if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
- PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
+ if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315 && !v->DETSizeOverride[0])
+ PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, v->DETBufferSizeInKByte);
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
- v->DETBufferSizeInKByte[0],
+ mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
+ v->DETBufferSizeInKByte,
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
@@ -5274,8 +5286,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->TotImmediateFlipBytes = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->TotImmediateFlipBytes = v->TotImmediateFlipBytes
- + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k]
- + v->DPTEBytesPerRow[i][j][k];
+ + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k]
+ + v->DPTEBytesPerRow[i][j][k]);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -6611,7 +6623,8 @@ static void CalculateStutterEfficiency(
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
- unsigned int DETBufferSizeInKByte,
+ bool DETSharedByAllDPP,
+ unsigned int DETBufferSizeInKByteA[],
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
@@ -6695,6 +6708,10 @@ static void CalculateSwathAndDETConfiguration(
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActivePlanes; ++k) {
+ unsigned int DETBufferSizeInKByte = DETBufferSizeInKByteA[k];
+
+ if (DETSharedByAllDPP && DPPPerPlane[k])
+ DETBufferSizeInKByte /= DPPPerPlane[k];
if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32 || SourcePixelFormat[k] == dm_444_16 || SourcePixelFormat[k] == dm_mono_16
|| SourcePixelFormat[k] == dm_mono_8 || SourcePixelFormat[k] == dm_rgbe)) {
if (SurfaceTiling[k] == dm_sw_linear
@@ -7017,7 +7034,7 @@ static double CalculateUrgentLatency(
return ret;
}
-static void UseMinimumDCFCLK(
+static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
int MaxPrefetchMode,
int ReorderingBytes)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 2244e4fb8c96..4113ce79c4af 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -987,8 +987,7 @@ static void dml_rq_dlg_get_dlg_params(
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
- disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
- disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
+ disp_dlg_regs->min_dst_y_next_start_us = 0;
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
@@ -1433,14 +1432,6 @@ static void dml_rq_dlg_get_dlg_params(
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
- // hack for FPGA
- if (mode_lib->project == DML_PROJECT_DCN31_FPGA) {
- if (disp_dlg_regs->vratio_prefetch >= (unsigned int) dml_pow(2, 22)) {
- disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 22) - 1;
- dml_print("vratio_prefetch exceed the max value, the register field is [21:0]\n");
- }
- }
-
disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 9e54e3d0eb78..07adb614366e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -31,6 +31,7 @@
#include "dml/dcn20/dcn20_fpu.h"
#include "dml/dcn31/dcn31_fpu.h"
#include "dml/display_mode_vba.h"
+#include "dml/dml_inline_defs.h"
struct _vcs_dpi_ip_params_st dcn3_14_ip = {
.VBlankNomDefaultUS = 668,
@@ -190,8 +191,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dc_assert_fp_enabled();
// Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc->config.use_default_clock_table == false) {
-
+ if (dc->config.use_default_clock_table == false) {
dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
@@ -266,11 +266,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
}
dcn20_patch_bounding_box(dc, &dcn3_14_soc);
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
- else
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
}
static bool is_dual_plane(enum surface_pixel_format format)
@@ -278,6 +274,25 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
+/*
+ * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing
+ *
+ * @param: num_us: number of microseconds
+ * @return: number of vertical lines. If exact number of vertical lines is not found then
+ * it will round up to next number of lines to guarantee num_us
+ */
+static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing)
+{
+ unsigned int num_lines = 0;
+ unsigned int lines_time_in_ns = 1000.0 *
+ (((float)timing->h_total * 1000.0) /
+ ((float)timing->pix_clk_100hz / 10.0));
+
+ num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0);
+
+ return num_lines;
+}
+
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
@@ -286,6 +301,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
bool upscaled = false;
+ const unsigned int max_allowed_vblank_nom = 1023;
dc_assert_fp_enabled();
@@ -293,15 +309,24 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
+ unsigned int num_lines = 0;
if (!res_ctx->pipe_ctx[i].stream)
continue;
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
- if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
- && pipe->stream->adjust.v_total_min > timing->v_total)
+ num_lines = micro_sec_to_vert_lines(dcn3_14_ip.VBlankNomDefaultUS, timing);
+
+ if (pipe->stream->adjust.v_total_min != 0)
pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+ else
+ pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+
+ pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
+ pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
if (pipe->plane_state &&
(pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
@@ -323,8 +348,6 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.dest.vblank_nom =
- dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0));
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 7eb2173b7691..a94aa0f21a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -3613,7 +3613,7 @@ static void CalculateFlipSchedule(
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
double LineTime = v->HTotal[k] / v->PixelClock[k];
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
@@ -4227,7 +4227,9 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
}
if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN314_MAX_FMT_420_BUFFER_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
- if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) {
+ if (v->Output[k] == dm_hdmi) {
+ FMTBufferExceeded = true;
+ } else if (v->HActive[k] / 2 > DCN314_MAX_FMT_420_BUFFER_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
@@ -5371,8 +5373,8 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
v->TotImmediateFlipBytes = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->TotImmediateFlipBytes = v->TotImmediateFlipBytes
- + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k]
- + v->DPTEBytesPerRow[i][j][k];
+ + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k]
+ + v->DPTEBytesPerRow[i][j][k]);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5557,6 +5559,65 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
}
}
}
+ for (i = v->soc.num_states; i >= 0; i--) {
+ for (j = 0; j < 2; j++) {
+ enum dm_validation_status status = DML_VALIDATION_OK;
+
+ if (!v->ScaleRatioAndTapsSupport) {
+ status = DML_FAIL_SCALE_RATIO_TAP;
+ } else if (!v->SourceFormatPixelAndScanSupport) {
+ status = DML_FAIL_SOURCE_PIXEL_FORMAT;
+ } else if (!v->ViewportSizeSupport[i][j]) {
+ status = DML_FAIL_VIEWPORT_SIZE;
+ } else if (P2IWith420) {
+ status = DML_FAIL_P2I_WITH_420;
+ } else if (DSCOnlyIfNecessaryWithBPP) {
+ status = DML_FAIL_DSC_ONLY_IF_NECESSARY_WITH_BPP;
+ } else if (DSC422NativeNotSupported) {
+ status = DML_FAIL_NOT_DSC422_NATIVE;
+ } else if (!v->ODMCombine4To1SupportCheckOK[i]) {
+ status = DML_FAIL_ODM_COMBINE4TO1;
+ } else if (v->NotEnoughDSCUnits[i]) {
+ status = DML_FAIL_NOT_ENOUGH_DSC;
+ } else if (!v->ROBSupport[i][j]) {
+ status = DML_FAIL_REORDERING_BUFFER;
+ } else if (!v->DISPCLK_DPPCLK_Support[i][j]) {
+ status = DML_FAIL_DISPCLK_DPPCLK;
+ } else if (!v->TotalAvailablePipesSupport[i][j]) {
+ status = DML_FAIL_TOTAL_AVAILABLE_PIPES;
+ } else if (!EnoughWritebackUnits) {
+ status = DML_FAIL_ENOUGH_WRITEBACK_UNITS;
+ } else if (!v->WritebackLatencySupport) {
+ status = DML_FAIL_WRITEBACK_LATENCY;
+ } else if (!v->WritebackScaleRatioAndTapsSupport) {
+ status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP;
+ } else if (!v->CursorSupport) {
+ status = DML_FAIL_CURSOR_SUPPORT;
+ } else if (!v->PitchSupport) {
+ status = DML_FAIL_PITCH_SUPPORT;
+ } else if (ViewportExceedsSurface) {
+ status = DML_FAIL_VIEWPORT_EXCEEDS_SURFACE;
+ } else if (!v->PrefetchSupported[i][j]) {
+ status = DML_FAIL_PREFETCH_SUPPORT;
+ } else if (!v->DynamicMetadataSupported[i][j]) {
+ status = DML_FAIL_DYNAMIC_METADATA;
+ } else if (!v->TotalVerticalActiveBandwidthSupport[i][j]) {
+ status = DML_FAIL_TOTAL_V_ACTIVE_BW;
+ } else if (!v->VRatioInPrefetchSupported[i][j]) {
+ status = DML_FAIL_V_RATIO_PREFETCH;
+ } else if (!v->PTEBufferSizeNotExceeded[i][j]) {
+ status = DML_FAIL_PTE_BUFFER_SIZE;
+ } else if (v->NonsupportedDSCInputBPC) {
+ status = DML_FAIL_DSC_INPUT_BPC;
+ } else if ((v->HostVMEnable
+ && !v->ImmediateFlipSupportedForState[i][j])) {
+ status = DML_FAIL_HOST_VM_IMMEDIATE_FLIP;
+ } else if (FMTBufferExceeded) {
+ status = DML_FAIL_FMT_BUFFER_EXCEEDED;
+ }
+ mode_lib->vba.ValidationStatus[i] = status;
+ }
+ }
{
unsigned int MaximumMPCCombine = 0;
@@ -7061,7 +7122,7 @@ static double CalculateUrgentLatency(
return ret;
}
-static void UseMinimumDCFCLK(
+static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
int MaxPrefetchMode,
int ReorderingBytes)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index ea4eb66066c4..b3e8dc08030c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -951,7 +951,6 @@ static void dml_rq_dlg_get_dlg_params(
{
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
- const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
@@ -1000,8 +999,6 @@ static void dml_rq_dlg_get_dlg_params(
unsigned int vupdate_width;
unsigned int vready_offset;
- unsigned int dispclk_delay_subtotal;
-
unsigned int vstartup_start;
unsigned int dst_x_after_scaler;
unsigned int dst_y_after_scaler;
@@ -1051,7 +1048,6 @@ static void dml_rq_dlg_get_dlg_params(
float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
- int blank_lines = 0;
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
@@ -1075,17 +1071,10 @@ static void dml_rq_dlg_get_dlg_params(
min_ttu_vblank = get_min_ttu_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
- disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
- disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
- disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
- blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1);
- if (blank_lines < 0)
- blank_lines = 0;
- if (blank_lines != 0) {
- disp_dlg_regs->optimized_min_dst_y_next_start = vba__min_dst_y_next_start;
- disp_dlg_regs->optimized_min_dst_y_next_start_us = (disp_dlg_regs->optimized_min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
- disp_dlg_regs->min_dst_y_next_start = disp_dlg_regs->optimized_min_dst_y_next_start;
- }
+ disp_dlg_regs->min_dst_y_next_start_us =
+ (vba__min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
+ disp_dlg_regs->min_dst_y_next_start = vba__min_dst_y_next_start * dml_pow(2, 2);
+
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
@@ -1127,13 +1116,6 @@ static void dml_rq_dlg_get_dlg_params(
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
- dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
-
- if (dout->dsc_enable) {
- double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // FROM VBA
-
- dispclk_delay_subtotal += dsc_delay;
- }
vstartup_start = dst->vstartup_start;
if (interlaced) {
@@ -1538,14 +1520,6 @@ static void dml_rq_dlg_get_dlg_params(
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
- // hack for FPGA
- if (mode_lib->project == DML_PROJECT_DCN31_FPGA) {
- if (disp_dlg_regs->vratio_prefetch >= (unsigned int) dml_pow(2, 22)) {
- disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 22) - 1;
- dml_print("vratio_prefetch exceed the max value, the register field is [21:0]\n");
- }
- }
-
disp_dlg_regs->refcyc_per_pte_group_vblank_l = (unsigned int) (dst_y_per_row_vblank * (double) htotal * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 47beb4ea779d..711d4085b33b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -35,6 +35,15 @@
#define DC_LOGGER_INIT(logger)
+static const struct subvp_high_refresh_list subvp_high_refresh_list = {
+ .min_refresh = 120,
+ .max_refresh = 175,
+ .res = {
+ {.width = 3840, .height = 2160, },
+ {.width = 3440, .height = 1440, },
+ {.width = 2560, .height = 1440, }},
+};
+
struct _vcs_dpi_ip_params_st dcn3_2_ip = {
.gpuvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
@@ -138,7 +147,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 100.0,
+ .pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
@@ -476,24 +485,20 @@ static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry)
}
}
-void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
+static void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
unsigned int *num_entries,
struct _vcs_dpi_voltage_scaling_st *entry)
{
int i = 0;
int index = 0;
- float net_bw_of_new_state = 0;
dc_assert_fp_enabled();
- get_optimal_ntuple(entry);
-
if (*num_entries == 0) {
table[0] = *entry;
(*num_entries)++;
} else {
- net_bw_of_new_state = calculate_net_bw_in_kbytes_sec(entry);
- while (net_bw_of_new_state > calculate_net_bw_in_kbytes_sec(&table[index])) {
+ while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) {
index++;
if (index >= *num_entries)
break;
@@ -670,7 +675,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
unsigned int max_frame_time = 0;
bool valid_assignment_found = false;
unsigned int free_pipes = dcn32_get_num_free_pipes(dc, context);
- bool current_assignment_freesync = false;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
@@ -692,8 +696,12 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* to combine this with SubVP can cause issues with the scheduling).
* - Not TMZ surface
*/
- if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !dcn32_is_psr_capable(pipe) &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
+ if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
+ !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
+ (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
+ pipe->stream->mall_stream_config.type == SUBVP_NONE &&
+ (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
+ !pipe->plane_state->address.tmz_surface &&
(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
dcn32_allow_subvp_with_active_margin(pipe)))) {
@@ -707,19 +715,10 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
struct dc_stream_state *stream = pipe->stream;
unsigned int frame_us = (stream->timing.v_total * stream->timing.h_total /
(double)(stream->timing.pix_clk_100hz * 100)) * 1000000;
- if (frame_us > max_frame_time && !stream->ignore_msa_timing_param) {
+ if (frame_us > max_frame_time) {
*index = i;
max_frame_time = frame_us;
valid_assignment_found = true;
- current_assignment_freesync = false;
- /* For the 2-Freesync display case, still choose the one with the
- * longest frame time
- */
- } else if (stream->ignore_msa_timing_param && (!valid_assignment_found ||
- (current_assignment_freesync && frame_us > max_frame_time))) {
- *index = i;
- valid_assignment_found = true;
- current_assignment_freesync = true;
}
}
}
@@ -757,7 +756,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Find the minimum pipe split count for non SubVP pipes
- if (pipe->stream && !pipe->top_pipe &&
+ if (resource_is_pipe_type(pipe, OPP_HEAD) &&
pipe->stream->mall_stream_config.type == SUBVP_NONE) {
split_cnt = 0;
while (pipe) {
@@ -851,10 +850,9 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
}
/**
- * subvp_drr_schedulable - Determine if SubVP + DRR config is schedulable
+ * subvp_drr_schedulable() - Determine if SubVP + DRR config is schedulable
* @dc: current dc state
* @context: new dc state
- * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
@@ -865,11 +863,12 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
*
* Return: True if the SubVP + DRR config is schedulable, false otherwise
*/
-static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struct pipe_ctx *drr_pipe)
+static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
{
bool schedulable = false;
uint32_t i;
struct pipe_ctx *pipe = NULL;
+ struct pipe_ctx *drr_pipe = NULL;
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
struct dc_crtc_timing *drr_timing = NULL;
@@ -880,10 +879,6 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
int16_t stretched_drr_us = 0;
int16_t drr_stretched_vblank_us = 0;
int16_t max_vblank_mallregion = 0;
- const struct dc_config *config = &dc->config;
-
- if (config->disable_subvp_drr)
- return false;
// Find SubVP pipe
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -891,7 +886,8 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
- if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !resource_is_pipe_type(pipe, DPP_PIPE))
continue;
// Find the SubVP pipe
@@ -899,6 +895,20 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
break;
}
+ // Find the DRR pipe
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ drr_pipe = &context->res_ctx.pipe_ctx[i];
+
+ // We check for master pipe only
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !resource_is_pipe_type(pipe, DPP_PIPE))
+ continue;
+
+ if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
+ (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable))
+ break;
+ }
+
main_timing = &pipe->stream->timing;
phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
drr_timing = &drr_pipe->stream->timing;
@@ -972,7 +982,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
- if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !resource_is_pipe_type(pipe, DPP_PIPE))
continue;
if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
@@ -984,13 +995,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
subvp_pipe = pipe;
}
- // Use ignore_msa_timing_param and VRR active, or Freesync flag to identify as DRR On
- if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param &&
- (context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync ||
- context->res_ctx.pipe_ctx[vblank_index].stream->vrr_active_variable)) {
- // SUBVP + DRR case -- only allowed if run through DRR validation path
- schedulable = false;
- } else if (found) {
+ if (found) {
main_timing = &subvp_pipe->stream->timing;
phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
@@ -1020,6 +1025,56 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
}
/**
+ * subvp_subvp_admissable() - Determine if subvp + subvp config is admissible
+ *
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ *
+ * SubVP + SubVP is admissible under the following conditions:
+ * - All SubVP pipes are < 120Hz OR
+ * - All SubVP pipes are >= 120hz
+ *
+ * Return: True if admissible, false otherwise
+ */
+static bool subvp_subvp_admissable(struct dc *dc,
+ struct dc_state *context)
+{
+ bool result = false;
+ uint32_t i;
+ uint8_t subvp_count = 0;
+ uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0;
+ uint64_t refresh_rate = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->plane_state && !pipe->top_pipe &&
+ pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+
+ if ((uint32_t)refresh_rate < min_refresh)
+ min_refresh = (uint32_t)refresh_rate;
+ if ((uint32_t)refresh_rate > max_refresh)
+ max_refresh = (uint32_t)refresh_rate;
+ subvp_count++;
+ }
+ }
+
+ if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) ||
+ (min_refresh >= subvp_high_refresh_list.min_refresh &&
+ max_refresh <= subvp_high_refresh_list.max_refresh)))
+ result = true;
+
+ return result;
+}
+
+/**
* subvp_validate_static_schedulability - Check which SubVP case is calculated
* and handle static analysis based on the case.
* @dc: current dc state
@@ -1037,11 +1092,12 @@ static bool subvp_validate_static_schedulability(struct dc *dc,
struct dc_state *context,
int vlevel)
{
- bool schedulable = true; // true by default for single display case
+ bool schedulable = false;
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
uint32_t i, pipe_idx;
uint8_t subvp_count = 0;
uint8_t vactive_count = 0;
+ uint8_t non_subvp_pipes = 0;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1049,14 +1105,18 @@ static bool subvp_validate_static_schedulability(struct dc *dc,
if (!pipe->stream)
continue;
- if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN)
- subvp_count++;
+ if (pipe->plane_state && !pipe->top_pipe) {
+ if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ subvp_count++;
+ if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ non_subvp_pipes++;
+ }
+ }
// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
// switching (SubVP + VACTIVE unsupported). In situations where we force
// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
- if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 &&
+ if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
pipe->stream->mall_stream_config.type == SUBVP_NONE) {
vactive_count++;
}
@@ -1065,13 +1125,14 @@ static bool subvp_validate_static_schedulability(struct dc *dc,
if (subvp_count == 2) {
// Static schedulability check for SubVP + SubVP case
- schedulable = subvp_subvp_schedulable(dc, context);
- } else if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) {
- // Static schedulability check for SubVP + VBLANK case. Also handle the case where
- // DML outputs SubVP + VBLANK + VACTIVE (DML will report as SubVP + VBLANK)
- if (vactive_count > 0)
- schedulable = false;
- else
+ schedulable = subvp_subvp_admissable(dc, context) && subvp_subvp_schedulable(dc, context);
+ } else if (subvp_count == 1 && non_subvp_pipes == 0) {
+ // Single SubVP configs will be supported by default as long as it's suppported by DML
+ schedulable = true;
+ } else if (subvp_count == 1 && non_subvp_pipes == 1) {
+ if (dcn32_subvp_drr_admissable(dc, context))
+ schedulable = subvp_drr_schedulable(dc, context);
+ else if (dcn32_subvp_vblank_admissable(dc, context, vlevel))
schedulable = subvp_vblank_schedulable(dc, context);
} else if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vactive_w_mall_sub_vp &&
vactive_count > 0) {
@@ -1095,10 +1156,6 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
unsigned int dc_pipe_idx = 0;
int i = 0;
bool found_supported_config = false;
- struct pipe_ctx *pipe = NULL;
- uint32_t non_subvp_pipes = 0;
- bool drr_pipe_found = false;
- uint32_t drr_pipe_index = 0;
dc_assert_fp_enabled();
@@ -1129,7 +1186,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
*/
- if (!dc->debug.force_disable_subvp && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
+ if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
!dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) &&
(*vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
@@ -1188,31 +1245,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
}
}
- if (*vlevel < context->bw_ctx.dml.soc.num_states &&
- vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported
- && subvp_validate_static_schedulability(dc, context, *vlevel)) {
+ if (*vlevel < context->bw_ctx.dml.soc.num_states
+ && subvp_validate_static_schedulability(dc, context, *vlevel))
found_supported_config = true;
- } else if (*vlevel < context->bw_ctx.dml.soc.num_states) {
- /* Case where 1 SubVP is added, and DML reports MCLK unsupported or DRR is allowed.
- * This handles the case for SubVP + DRR, where the DRR display does not support MCLK
- * switch at it's native refresh rate / timing, or DRR is allowed for the non-subvp
- * display.
- */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
- non_subvp_pipes++;
- // Use ignore_msa_timing_param flag to identify as DRR
- if (pipe->stream->ignore_msa_timing_param && pipe->stream->allow_freesync) {
- drr_pipe_found = true;
- drr_pipe_index = i;
- }
- }
- }
- // If there is only 1 remaining non SubVP pipe that is DRR, check static
- // schedulability for SubVP + DRR.
- if (non_subvp_pipes == 1 && drr_pipe_found) {
+ if (found_supported_config) {
+ // For SubVP + DRR cases, we can force the lowest vlevel that supports the mode
+ if (dcn32_subvp_drr_admissable(dc, context) && subvp_drr_schedulable(dc, context)) {
/* find lowest vlevel that supports the config */
for (i = *vlevel; i >= 0; i--) {
if (vba->ModeSupport[i][vba->maxMpcComb]) {
@@ -1221,9 +1259,6 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
break;
}
}
-
- found_supported_config = subvp_drr_schedulable(dc, context,
- &context->res_ctx.pipe_ctx[drr_pipe_index]);
}
}
}
@@ -1315,6 +1350,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
int i, pipe_idx, active_hubp_count = 0;
bool usr_retraining_support = false;
bool unbounded_req_enabled = false;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
dc_assert_fp_enabled();
@@ -1396,6 +1432,11 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0)
+ context->res_ctx.pipe_ctx[i].has_vactive_margin = true;
+ else
+ context->res_ctx.pipe_ctx[i].has_vactive_margin = false;
+
/* MALL Allocation Sizes */
/* count from active, top pipes per plane only */
if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state &&
@@ -1432,6 +1473,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
}
/*save a original dppclock copy*/
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
@@ -1679,8 +1721,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
&& !dc->config.enable_windowed_mpo_odm
&& pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
+ && memcmp(&mpo_pipe->plane_state->clip_rect,
+ &pipe->stream->src,
sizeof(struct rect)) != 0) {
ASSERT(mpo_pipe->plane_state != pipe->plane_state);
goto validate_fail;
@@ -2005,6 +2047,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = true;
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank;
} else {
/* Restore FCLK latency and re-run validation to go back to original validation
* output if we find that enabling FPO does not give us any benefit (i.e. lower
@@ -2062,6 +2105,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
* sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
*/
+ /*
if (dcn3_2_soc.num_states > 2) {
vlevel_temp = 0;
dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
@@ -2088,6 +2132,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ */
/* Set C, for Dummy P-State:
* All clocks min.
@@ -2189,6 +2234,9 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.watermarks.a.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
}
+ /* Make set D = set A since we do not optimized watermarks for MALL */
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
+
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
@@ -2303,14 +2351,105 @@ void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
}
-static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
+static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry,
+ struct _vcs_dpi_voltage_scaling_st *second_entry)
+{
+ struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry;
+ *first_entry = *second_entry;
+ *second_entry = temp_entry;
+}
+
+/*
+ * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK
+ */
+static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
+{
+ unsigned int start_index = 0;
+ unsigned int end_index = 0;
+ unsigned int current_bw = 0;
+
+ for (int i = 0; i < (*num_entries - 1); i++) {
+ if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
+ current_bw = table[i].net_bw_in_kbytes_sec;
+ start_index = i;
+ end_index = ++i;
+
+ while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw))
+ end_index = ++i;
+ }
+
+ if (start_index != end_index) {
+ for (int j = start_index; j < end_index; j++) {
+ for (int k = start_index; k < end_index; k++) {
+ if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz)
+ swap_table_entries(&table[k], &table[k+1]);
+ }
+ }
+ }
+
+ start_index = 0;
+ end_index = 0;
+
+ }
+}
+
+/*
+ * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing
+ * and remove entries that do not
+ */
+static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
+{
+ for (int i = 0; i < (*num_entries - 1); i++) {
+ if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
+ if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) ||
+ (table[i].fabricclk_mhz > table[i+1].fabricclk_mhz))
+ remove_entry_from_table_at_index(table, num_entries, i);
+ }
+ }
+}
+
+/*
+ * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings
+ * Input:
+ * max_clk_limit - struct containing the desired clock timings
+ * Output:
+ * curr_clk_limit - struct containing the timings that need to be overwritten
+ * Return: 0 upon success, non-zero for failure
+ */
+static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit,
+ struct clk_limit_table_entry *curr_clk_limit)
+{
+ if (NULL == max_clk_limit || NULL == curr_clk_limit)
+ return -1; //invalid parameters
+
+ //only overwrite if desired max clock frequency is initialized
+ if (max_clk_limit->dcfclk_mhz != 0)
+ curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz;
+
+ if (max_clk_limit->fclk_mhz != 0)
+ curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz;
+
+ if (max_clk_limit->memclk_mhz != 0)
+ curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz;
+
+ if (max_clk_limit->socclk_mhz != 0)
+ curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz;
+
+ if (max_clk_limit->dtbclk_mhz != 0)
+ curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz;
+
+ if (max_clk_limit->dispclk_mhz != 0)
+ curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz;
+
+ return 0;
+}
+
+static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params,
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
int i, j;
struct _vcs_dpi_voltage_scaling_st entry = {0};
-
- unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
- max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
+ struct clk_limit_table_entry max_clk_data = {0};
unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
@@ -2321,51 +2460,76 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
unsigned int num_fclk_dpms = 0;
unsigned int num_dcfclk_dpms = 0;
- for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
- if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
- max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
- if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
- max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
- if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
- max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
- if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
- if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
- if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
- max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
- if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
- max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ unsigned int num_dc_uclk_dpms = 0;
+ unsigned int num_dc_fclk_dpms = 0;
+ unsigned int num_dc_dcfclk_dpms = 0;
- if (bw_params->clk_table.entries[i].memclk_mhz > 0)
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz)
+ max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz)
+ max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz)
+ max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz)
+ max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz)
+ max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz)
+ max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz)
+ max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+
+ if (bw_params->clk_table.entries[i].memclk_mhz > 0) {
num_uclk_dpms++;
- if (bw_params->clk_table.entries[i].fclk_mhz > 0)
+ if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz)
+ num_dc_uclk_dpms++;
+ }
+ if (bw_params->clk_table.entries[i].fclk_mhz > 0) {
num_fclk_dpms++;
- if (bw_params->clk_table.entries[i].dcfclk_mhz > 0)
+ if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz)
+ num_dc_fclk_dpms++;
+ }
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) {
num_dcfclk_dpms++;
+ if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz)
+ num_dc_dcfclk_dpms++;
+ }
+ }
+
+ if (!disable_dc_mode_overwrite) {
+ //Overwrite max frequencies with max DC mode frequencies for DC mode systems
+ override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data);
+ num_uclk_dpms = num_dc_uclk_dpms;
+ num_fclk_dpms = num_dc_fclk_dpms;
+ num_dcfclk_dpms = num_dc_dcfclk_dpms;
+ bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms;
+ bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms;
}
if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
- if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz)
+ if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz)
return -1;
- if (max_dppclk_mhz == 0)
- max_dppclk_mhz = max_dispclk_mhz;
+ if (max_clk_data.dppclk_mhz == 0)
+ max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz;
- if (max_fclk_mhz == 0)
- max_fclk_mhz = max_dcfclk_mhz * dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / dcn3_2_soc.pct_ideal_fabric_bw_after_urgent;
+ if (max_clk_data.fclk_mhz == 0)
+ max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz *
+ dcn3_2_soc.pct_ideal_sdp_bw_after_urgent /
+ dcn3_2_soc.pct_ideal_fabric_bw_after_urgent;
- if (max_phyclk_mhz == 0)
- max_phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz;
+ if (max_clk_data.phyclk_mhz == 0)
+ max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz;
*num_entries = 0;
- entry.dispclk_mhz = max_dispclk_mhz;
- entry.dscclk_mhz = max_dispclk_mhz / 3;
- entry.dppclk_mhz = max_dppclk_mhz;
- entry.dtbclk_mhz = max_dtbclk_mhz;
- entry.phyclk_mhz = max_phyclk_mhz;
+ entry.dispclk_mhz = max_clk_data.dispclk_mhz;
+ entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3;
+ entry.dppclk_mhz = max_clk_data.dppclk_mhz;
+ entry.dtbclk_mhz = max_clk_data.dtbclk_mhz;
+ entry.phyclk_mhz = max_clk_data.phyclk_mhz;
entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz;
entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz;
@@ -2375,14 +2539,18 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
// Insert the max DCFCLK
- entry.dcfclk_mhz = max_dcfclk_mhz;
+ entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
// Insert the UCLK DPMS
@@ -2391,6 +2559,8 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
@@ -2401,15 +2571,19 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
}
// If FCLK fine grained, only insert max
else {
entry.dcfclk_mhz = 0;
- entry.fabricclk_mhz = max_fclk_mhz;
+ entry.fabricclk_mhz = max_clk_data.fclk_mhz;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
insert_entry_into_table_sorted(table, num_entries, &entry);
}
@@ -2419,12 +2593,27 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
// Remove states that require higher clocks than are supported
for (i = *num_entries - 1; i >= 0 ; i--) {
- if (table[i].dcfclk_mhz > max_dcfclk_mhz ||
- table[i].fabricclk_mhz > max_fclk_mhz ||
- table[i].dram_speed_mts > max_uclk_mhz * 16)
+ if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz ||
+ table[i].fabricclk_mhz > max_clk_data.fclk_mhz ||
+ table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16)
remove_entry_from_table_at_index(table, num_entries, i);
}
+ // Insert entry with all max dc limits without bandwidth matching
+ if (!disable_dc_mode_overwrite) {
+ struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry;
+
+ max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
+ max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz;
+ max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16;
+
+ max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry);
+ insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry);
+
+ sort_entries_with_same_bw(table, num_entries);
+ remove_inconsistent_entries(table, num_entries);
+ }
+
// At this point, the table only contains supported points of interest
// it could be used as is, but some states may be redundant due to
// coarse grained nature of some clocks, so we want to round up to
@@ -2508,80 +2697,78 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
{
dc_assert_fp_enabled();
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- /* Overrides from dc->config options */
- dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
-
- /* Override from passed dc->bb_overrides if available*/
- if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
- && dc->bb_overrides.sr_exit_time_ns) {
- dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
- }
+ /* Overrides from dc->config options */
+ dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
- if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000)
- != dc->bb_overrides.sr_enter_plus_exit_time_ns
- && dc->bb_overrides.sr_enter_plus_exit_time_ns) {
- dcn3_2_soc.sr_enter_plus_exit_time_us =
- dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
- }
+ /* Override from passed dc->bb_overrides if available*/
+ if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
+ && dc->bb_overrides.sr_exit_time_ns) {
+ dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ }
- if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
- && dc->bb_overrides.urgent_latency_ns) {
- dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
- dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
- }
+ if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000)
+ != dc->bb_overrides.sr_enter_plus_exit_time_ns
+ && dc->bb_overrides.sr_enter_plus_exit_time_ns) {
+ dcn3_2_soc.sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+ }
- if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000)
- != dc->bb_overrides.dram_clock_change_latency_ns
- && dc->bb_overrides.dram_clock_change_latency_ns) {
- dcn3_2_soc.dram_clock_change_latency_us =
- dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- }
+ if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
+ && dc->bb_overrides.urgent_latency_ns) {
+ dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ }
- if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
- != dc->bb_overrides.fclk_clock_change_latency_ns
- && dc->bb_overrides.fclk_clock_change_latency_ns) {
- dcn3_2_soc.fclk_change_latency_us =
- dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
- }
+ if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000)
+ != dc->bb_overrides.dram_clock_change_latency_ns
+ && dc->bb_overrides.dram_clock_change_latency_ns) {
+ dcn3_2_soc.dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+ }
- if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
- != dc->bb_overrides.dummy_clock_change_latency_ns
- && dc->bb_overrides.dummy_clock_change_latency_ns) {
- dcn3_2_soc.dummy_pstate_latency_us =
- dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
- }
+ if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_2_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
- /* Override from VBIOS if VBIOS bb_info available */
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
- struct bp_soc_bb_info bb_info = {0};
+ if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
+ != dc->bb_overrides.dummy_clock_change_latency_ns
+ && dc->bb_overrides.dummy_clock_change_latency_ns) {
+ dcn3_2_soc.dummy_pstate_latency_us =
+ dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
+ }
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
- if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_2_soc.dram_clock_change_latency_us =
- bb_info.dram_clock_change_latency_100ns * 10;
+ /* Override from VBIOS if VBIOS bb_info available */
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+ struct bp_soc_bb_info bb_info = {0};
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_2_soc.sr_enter_plus_exit_time_us =
- bb_info.dram_sr_enter_exit_latency_100ns * 10;
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
+ if (bb_info.dram_clock_change_latency_100ns > 0)
+ dcn3_2_soc.dram_clock_change_latency_us =
+ bb_info.dram_clock_change_latency_100ns * 10;
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_2_soc.sr_exit_time_us =
- bb_info.dram_sr_exit_latency_100ns * 10;
- }
- }
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_2_soc.sr_enter_plus_exit_time_us =
+ bb_info.dram_sr_enter_exit_latency_100ns * 10;
- /* Override from VBIOS for num_chan */
- if (dc->ctx->dc_bios->vram_info.num_chans) {
- dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
- dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
- dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_2_soc.sr_exit_time_us =
+ bb_info.dram_sr_exit_latency_100ns * 10;
}
+ }
- if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
- dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+ /* Override from VBIOS for num_chan */
+ if (dc->ctx->dc_bios->vram_info.num_chans) {
+ dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
+ dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
+ dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
}
+ if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+
/* DML DSC delay factor workaround */
dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0;
@@ -2592,7 +2779,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
/* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */
- if ((!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) && (bw_params->clk_table.entries[0].memclk_mhz)) {
+ if (bw_params->clk_table.entries[0].memclk_mhz) {
if (dc->debug.use_legacy_soc_bb_mechanism) {
unsigned int i = 0, j = 0, num_states = 0;
@@ -2736,7 +2923,8 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
dcn3_2_soc.clock_limits[i].phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz;
}
} else {
- build_synthetic_soc_states(bw_params, dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states);
+ build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params,
+ dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states);
}
/* Re-init DML with updated bb */
@@ -2783,15 +2971,76 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
}
/**
- * *******************************************************************************************
- * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy
+ * dcn32_allow_subvp_high_refresh_rate: Determine if the high refresh rate config will allow subvp
*
- * @param [in]: dc: Current DC state
- * @param [in]: context: New DC state to be programmed
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ * @pipe: Pipe to be considered for use in subvp
+ *
+ * On high refresh rate display configs, we will allow subvp under the following conditions:
+ * 1. Resolution is 3840x2160, 3440x1440, or 2560x1440
+ * 2. Refresh rate is between 120hz - 165hz
+ * 3. No scaling
+ * 4. Freesync is inactive
+ * 5. For single display cases, freesync must be disabled
+ *
+ * Return: True if pipe can be used for subvp, false otherwise
+ */
+bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe)
+{
+ bool allow = false;
+ uint32_t refresh_rate = 0;
+ uint32_t subvp_min_refresh = subvp_high_refresh_list.min_refresh;
+ uint32_t subvp_max_refresh = subvp_high_refresh_list.max_refresh;
+ uint32_t min_refresh = subvp_max_refresh;
+ uint32_t i;
+
+ /* Only allow SubVP on high refresh displays if all connected displays
+ * are considered "high refresh" (i.e. >= 120hz). We do not want to
+ * allow combinations such as 120hz (SubVP) + 60hz (SubVP).
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+ refresh_rate = (pipe_ctx->stream->timing.pix_clk_100hz * 100 +
+ pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total - 1)
+ / (double)(pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total);
+
+ if (refresh_rate < min_refresh)
+ min_refresh = refresh_rate;
+ }
+
+ if (!dc->debug.disable_subvp_high_refresh && min_refresh >= subvp_min_refresh && pipe->stream &&
+ pipe->plane_state && !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed)) {
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
+ / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
+ if (refresh_rate >= subvp_min_refresh && refresh_rate <= subvp_max_refresh) {
+ for (i = 0; i < SUBVP_HIGH_REFRESH_LIST_LEN; i++) {
+ uint32_t width = subvp_high_refresh_list.res[i].width;
+ uint32_t height = subvp_high_refresh_list.res[i].height;
+
+ if (dcn32_check_native_scaling_for_res(pipe, width, height)) {
+ if ((context->stream_count == 1 && !pipe->stream->allow_freesync) || context->stream_count > 1) {
+ allow = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ return allow;
+}
+
+/**
+ * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy
*
- * @return: Max vratio for prefetch
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
*
- * *******************************************************************************************
+ * Return: Max vratio for prefetch
*/
double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context)
{
@@ -2821,9 +3070,9 @@ double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *conte
* ActiveMargin <= 0 to be the FPO stream candidate if found.
*
*
- * @param [in]: dc - current dc state
- * @param [in]: context - new dc state
- * @param [out]: fpo_candidate_stream - pointer to FPO stream candidate if one is found
+ * @dc: current dc state
+ * @context: new dc state
+ * @fpo_candidate_stream: pointer to FPO stream candidate if one is found
*
* Return: void
*/
@@ -2849,10 +3098,9 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co
/**
* dcn32_find_vactive_pipe - Determines if the config has a pipe that can switch in VACTIVE
*
- * @param [in]: dc - current dc state
- * @param [in]: context - new dc state
- * @param [in]: vactive_margin_req_us - The vactive marign required for a vactive pipe to be
- * considered "found"
+ * @dc: current dc state
+ * @context: new dc state
+ * @vactive_margin_req_us: The vactive marign required for a vactive pipe to be considered "found"
*
* Return: True if VACTIVE display is found, false otherwise
*/
@@ -2861,6 +3109,7 @@ bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint
unsigned int i, pipe_idx;
const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
bool vactive_found = false;
+ unsigned int blank_us = 0;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -2868,7 +3117,10 @@ bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint
if (!pipe->stream)
continue;
- if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us) {
+ blank_us = ((pipe->stream->timing.v_total - pipe->stream->timing.v_addressable) * pipe->stream->timing.h_total /
+ (double)(pipe->stream->timing.pix_clk_100hz * 100)) * 1000000;
+ if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us &&
+ !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed) && blank_us < dc->debug.fpo_vactive_max_blank_us) {
vactive_found = true;
break;
}
@@ -2882,3 +3134,18 @@ void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
dc_assert_fp_enabled();
dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0;
}
+
+void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context)
+{
+ // WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue)
+ if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) &&
+ dc->dml.soc.num_chans <= 8) {
+ int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
+
+ if (context->bw_ctx.dml.vba.DRAMSpeed <= dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16 &&
+ num_mclk_levels > 1) {
+ context->bw_ctx.dml.vba.DRAMSpeed = dc->clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16;
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index dcf512cd3072..defbee866be6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -39,10 +39,6 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
uint8_t dcn32_predict_pipe_split(struct dc_state *context,
display_e2e_pipe_params_st *pipe_e2e);
-void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
- unsigned int *num_entries,
- struct _vcs_dpi_voltage_scaling_st *entry);
-
void dcn32_set_phantom_stream_timing(struct dc *dc,
struct dc_state *context,
struct pipe_ctx *ref_pipe,
@@ -80,6 +76,8 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co
bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req);
+void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context);
+
void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index d75248b6cae9..cbdfb762c10c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -811,7 +811,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightC[k],
TWait,
(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
- v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ?
+ v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->DSTXAfterScaler[k],
@@ -2323,10 +2323,14 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.LinkCapacitySupport[i] = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k
- && (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0
- || mode_lib->vba.Output[k] == dm_edp
- || mode_lib->vba.Output[k] == dm_hdmi)
- && mode_lib->vba.OutputBppPerState[i][k] == 0) {
+ && (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0
+ || mode_lib->vba.Output[k] == dm_edp
+ || mode_lib->vba.Output[k] == dm_hdmi)
+ && mode_lib->vba.OutputBppPerState[i][k] == 0 &&
+ (mode_lib->vba.UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe)) {
+ /* Phantom pipes don't consider DSC in DML, so it could fail link check.
+ * However, we don't care about the link for phantom pipes.
+ */
mode_lib->vba.LinkCapacitySupport[i] = false;
}
}
@@ -3311,7 +3315,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k], v->TWait,
- (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ?
+ (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
index d98e36a9a09c..c4745d63039b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
@@ -53,7 +53,7 @@
#define BPP_BLENDED_PIPE 0xffffffff
#define MEM_STROBE_FREQ_MHZ 1600
-#define MIN_DCFCLK_FREQ_MHZ 200
+#define DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ 300
#define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 61cc4904ade4..ecea008f19d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1595,7 +1595,6 @@ double dml32_TruncToValidBPP(
unsigned int NonDSCBPP0;
unsigned int NonDSCBPP1;
unsigned int NonDSCBPP2;
- unsigned int NonDSCBPP3;
if (Format == dm_420) {
NonDSCBPP0 = 12;
@@ -1604,10 +1603,9 @@ double dml32_TruncToValidBPP(
MinDSCBPP = 6;
MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1 / 16;
} else if (Format == dm_444) {
- NonDSCBPP0 = 18;
- NonDSCBPP1 = 24;
- NonDSCBPP2 = 30;
- NonDSCBPP3 = 36;
+ NonDSCBPP0 = 24;
+ NonDSCBPP1 = 30;
+ NonDSCBPP2 = 36;
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else {
@@ -1661,9 +1659,7 @@ double dml32_TruncToValidBPP(
else
return dml_floor(16.0 * MaxLinkBPP, 1.0) / 16.0;
} else {
- if (MaxLinkBPP >= NonDSCBPP3)
- return NonDSCBPP3;
- else if (MaxLinkBPP >= NonDSCBPP2)
+ if (MaxLinkBPP >= NonDSCBPP2)
return NonDSCBPP2;
else if (MaxLinkBPP >= NonDSCBPP1)
return NonDSCBPP1;
@@ -1674,7 +1670,7 @@ double dml32_TruncToValidBPP(
}
} else {
if (!((DSCEnable == false && (DesiredBPP == NonDSCBPP2 || DesiredBPP == NonDSCBPP1 ||
- DesiredBPP == NonDSCBPP0 || DesiredBPP == NonDSCBPP3)) ||
+ DesiredBPP <= NonDSCBPP0)) ||
(DSCEnable && DesiredBPP >= MinDSCBPP && DesiredBPP <= MaxDSCBPP)))
return BPP_INVALID;
else
@@ -3463,6 +3459,7 @@ bool dml32_CalculatePrefetchSchedule(
double TimeForFetchingMetaPTE = 0;
double TimeForFetchingRowInVBlank = 0;
double LinesToRequestPrefetchPixelData = 0;
+ double LinesForPrefetchBandwidth = 0;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
double Tvm_trips;
@@ -3892,11 +3889,15 @@ bool dml32_CalculatePrefetchSchedule(
TimeForFetchingMetaPTE = Tvm_oto;
TimeForFetchingRowInVBlank = Tr0_oto;
*PrefetchBandwidth = prefetch_bw_oto;
+ /* Clamp to oto for bandwidth calculation */
+ LinesForPrefetchBandwidth = dst_y_prefetch_oto;
} else {
*DestinationLinesForPrefetch = dst_y_prefetch_equ;
TimeForFetchingMetaPTE = Tvm_equ;
TimeForFetchingRowInVBlank = Tr0_equ;
*PrefetchBandwidth = prefetch_bw_equ;
+ /* Clamp to equ for bandwidth calculation */
+ LinesForPrefetchBandwidth = dst_y_prefetch_equ;
}
*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
@@ -3904,7 +3905,7 @@ bool dml32_CalculatePrefetchSchedule(
*DestinationLinesToRequestRowInVBlank =
dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
- LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch -
+ LinesToRequestPrefetchPixelData = LinesForPrefetchBandwidth -
*DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#ifdef __DML_VBA_DEBUG__
@@ -4128,7 +4129,7 @@ void dml32_CalculateFlipSchedule(
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
- double ImmediateFlipBW;
+ double ImmediateFlipBW = 1.0;
if (GPUVMEnable == true && HostVMEnable == true)
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
@@ -4342,7 +4343,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
+ v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->USRRetrainingRequiredFinal)
- v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark
+ v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
if (TotalActiveWriteback <= 1) {
@@ -4660,6 +4661,10 @@ void dml32_CalculateMinAndMaxPrefetchMode(
} else if (AllowForPStateChangeOrStutterInVBlankFinal == dm_prefetch_support_uclk_fclk_and_stutter) {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 0;
+ } else if (AllowForPStateChangeOrStutterInVBlankFinal ==
+ dm_prefetch_support_uclk_fclk_and_stutter_if_possible) {
+ *MinPrefetchMode = 0;
+ *MaxPrefetchMode = 3;
} else {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
index 395ae8761980..9ba6cb67655f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
@@ -116,7 +116,7 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
else
rq_regs->rq_regs_l.min_meta_chunk_size = dml_log2(min_meta_chunk_bytes) - 6 + 1;
- if (min_meta_chunk_bytes == 0)
+ if (p1_min_meta_chunk_bytes == 0)
rq_regs->rq_regs_c.min_meta_chunk_size = 0;
else
rq_regs->rq_regs_c.min_meta_chunk_size = dml_log2(p1_min_meta_chunk_bytes) - 6 + 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 342a1bcb4927..b26fcf86014c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -207,24 +207,20 @@ static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *
return limiting_bw_kbytes_sec;
}
-void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
+static void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
unsigned int *num_entries,
struct _vcs_dpi_voltage_scaling_st *entry)
{
int i = 0;
int index = 0;
- float net_bw_of_new_state = 0;
dc_assert_fp_enabled();
- get_optimal_ntuple(entry);
-
if (*num_entries == 0) {
table[0] = *entry;
(*num_entries)++;
} else {
- net_bw_of_new_state = calculate_net_bw_in_kbytes_sec(entry);
- while (net_bw_of_new_state > calculate_net_bw_in_kbytes_sec(&table[index])) {
+ while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) {
index++;
if (index >= *num_entries)
break;
@@ -252,14 +248,105 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
}
-static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
+static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry,
+ struct _vcs_dpi_voltage_scaling_st *second_entry)
+{
+ struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry;
+ *first_entry = *second_entry;
+ *second_entry = temp_entry;
+}
+
+/*
+ * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK
+ */
+static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
+{
+ unsigned int start_index = 0;
+ unsigned int end_index = 0;
+ unsigned int current_bw = 0;
+
+ for (int i = 0; i < (*num_entries - 1); i++) {
+ if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
+ current_bw = table[i].net_bw_in_kbytes_sec;
+ start_index = i;
+ end_index = ++i;
+
+ while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw))
+ end_index = ++i;
+ }
+
+ if (start_index != end_index) {
+ for (int j = start_index; j < end_index; j++) {
+ for (int k = start_index; k < end_index; k++) {
+ if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz)
+ swap_table_entries(&table[k], &table[k+1]);
+ }
+ }
+ }
+
+ start_index = 0;
+ end_index = 0;
+
+ }
+}
+
+/*
+ * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing
+ * and remove entries that do not follow this order
+ */
+static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
+{
+ for (int i = 0; i < (*num_entries - 1); i++) {
+ if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
+ if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) ||
+ (table[i].fabricclk_mhz > table[i+1].fabricclk_mhz))
+ remove_entry_from_table_at_index(table, num_entries, i);
+ }
+ }
+}
+
+/*
+ * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings
+ * Input:
+ * max_clk_limit - struct containing the desired clock timings
+ * Output:
+ * curr_clk_limit - struct containing the timings that need to be overwritten
+ * Return: 0 upon success, non-zero for failure
+ */
+static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit,
+ struct clk_limit_table_entry *curr_clk_limit)
+{
+ if (NULL == max_clk_limit || NULL == curr_clk_limit)
+ return -1; //invalid parameters
+
+ //only overwrite if desired max clock frequency is initialized
+ if (max_clk_limit->dcfclk_mhz != 0)
+ curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz;
+
+ if (max_clk_limit->fclk_mhz != 0)
+ curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz;
+
+ if (max_clk_limit->memclk_mhz != 0)
+ curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz;
+
+ if (max_clk_limit->socclk_mhz != 0)
+ curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz;
+
+ if (max_clk_limit->dtbclk_mhz != 0)
+ curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz;
+
+ if (max_clk_limit->dispclk_mhz != 0)
+ curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz;
+
+ return 0;
+}
+
+static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params,
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
int i, j;
struct _vcs_dpi_voltage_scaling_st entry = {0};
-
- unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
- max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
+ struct clk_limit_table_entry max_clk_data = {0};
unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
@@ -270,51 +357,76 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
unsigned int num_fclk_dpms = 0;
unsigned int num_dcfclk_dpms = 0;
+ unsigned int num_dc_uclk_dpms = 0;
+ unsigned int num_dc_fclk_dpms = 0;
+ unsigned int num_dc_dcfclk_dpms = 0;
+
for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
- if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
- max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
- if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
- max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
- if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
- max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
- if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
- if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
- if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
- max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
- if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
- max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
-
- if (bw_params->clk_table.entries[i].memclk_mhz > 0)
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz)
+ max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz)
+ max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz)
+ max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz)
+ max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz)
+ max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz)
+ max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz)
+ max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+
+ if (bw_params->clk_table.entries[i].memclk_mhz > 0) {
num_uclk_dpms++;
- if (bw_params->clk_table.entries[i].fclk_mhz > 0)
+ if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz)
+ num_dc_uclk_dpms++;
+ }
+ if (bw_params->clk_table.entries[i].fclk_mhz > 0) {
num_fclk_dpms++;
- if (bw_params->clk_table.entries[i].dcfclk_mhz > 0)
+ if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz)
+ num_dc_fclk_dpms++;
+ }
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) {
num_dcfclk_dpms++;
+ if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz)
+ num_dc_dcfclk_dpms++;
+ }
+ }
+
+ if (!disable_dc_mode_overwrite) {
+ //Overwrite max frequencies with max DC mode frequencies for DC mode systems
+ override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data);
+ num_uclk_dpms = num_dc_uclk_dpms;
+ num_fclk_dpms = num_dc_fclk_dpms;
+ num_dcfclk_dpms = num_dc_dcfclk_dpms;
+ bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms;
+ bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms;
}
if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
- if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz)
+ if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz)
return -1;
- if (max_dppclk_mhz == 0)
- max_dppclk_mhz = max_dispclk_mhz;
+ if (max_clk_data.dppclk_mhz == 0)
+ max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz;
- if (max_fclk_mhz == 0)
- max_fclk_mhz = max_dcfclk_mhz * dcn3_21_soc.pct_ideal_sdp_bw_after_urgent / dcn3_21_soc.pct_ideal_fabric_bw_after_urgent;
+ if (max_clk_data.fclk_mhz == 0)
+ max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz *
+ dcn3_21_soc.pct_ideal_sdp_bw_after_urgent /
+ dcn3_21_soc.pct_ideal_fabric_bw_after_urgent;
- if (max_phyclk_mhz == 0)
- max_phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz;
+ if (max_clk_data.phyclk_mhz == 0)
+ max_clk_data.phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz;
*num_entries = 0;
- entry.dispclk_mhz = max_dispclk_mhz;
- entry.dscclk_mhz = max_dispclk_mhz / 3;
- entry.dppclk_mhz = max_dppclk_mhz;
- entry.dtbclk_mhz = max_dtbclk_mhz;
- entry.phyclk_mhz = max_phyclk_mhz;
+ entry.dispclk_mhz = max_clk_data.dispclk_mhz;
+ entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3;
+ entry.dppclk_mhz = max_clk_data.dppclk_mhz;
+ entry.dtbclk_mhz = max_clk_data.dtbclk_mhz;
+ entry.phyclk_mhz = max_clk_data.phyclk_mhz;
entry.phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz;
entry.phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz;
@@ -324,14 +436,18 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
// Insert the max DCFCLK
- entry.dcfclk_mhz = max_dcfclk_mhz;
+ entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
// Insert the UCLK DPMS
@@ -340,6 +456,8 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
@@ -350,15 +468,19 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
}
// If FCLK fine grained, only insert max
else {
entry.dcfclk_mhz = 0;
- entry.fabricclk_mhz = max_fclk_mhz;
+ entry.fabricclk_mhz = max_clk_data.fclk_mhz;
entry.dram_speed_mts = 0;
+ get_optimal_ntuple(&entry);
+ entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
dcn321_insert_entry_into_table_sorted(table, num_entries, &entry);
}
@@ -368,12 +490,29 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
// Remove states that require higher clocks than are supported
for (i = *num_entries - 1; i >= 0 ; i--) {
- if (table[i].dcfclk_mhz > max_dcfclk_mhz ||
- table[i].fabricclk_mhz > max_fclk_mhz ||
- table[i].dram_speed_mts > max_uclk_mhz * 16)
+ if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz ||
+ table[i].fabricclk_mhz > max_clk_data.fclk_mhz ||
+ table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16)
remove_entry_from_table_at_index(table, num_entries, i);
}
+ // Insert entry with all max dc limits without bandwitch matching
+ if (!disable_dc_mode_overwrite) {
+ struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry;
+
+ max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
+ max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz;
+ max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16;
+
+ max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry);
+ dcn321_insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry);
+
+ sort_entries_with_same_bw(table, num_entries);
+ remove_inconsistent_entries(table, num_entries);
+ }
+
+
+
// At this point, the table only contains supported points of interest
// it could be used as is, but some states may be redundant due to
// coarse grained nature of some clocks, so we want to round up to
@@ -471,80 +610,78 @@ static void dcn321_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
{
dc_assert_fp_enabled();
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- /* Overrides from dc->config options */
- dcn3_21_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
-
- /* Override from passed dc->bb_overrides if available*/
- if ((int)(dcn3_21_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
- && dc->bb_overrides.sr_exit_time_ns) {
- dcn3_21_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
- }
+ /* Overrides from dc->config options */
+ dcn3_21_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
- if ((int)(dcn3_21_soc.sr_enter_plus_exit_time_us * 1000)
- != dc->bb_overrides.sr_enter_plus_exit_time_ns
- && dc->bb_overrides.sr_enter_plus_exit_time_ns) {
- dcn3_21_soc.sr_enter_plus_exit_time_us =
- dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
- }
+ /* Override from passed dc->bb_overrides if available*/
+ if ((int)(dcn3_21_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
+ && dc->bb_overrides.sr_exit_time_ns) {
+ dcn3_21_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ }
- if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
- && dc->bb_overrides.urgent_latency_ns) {
- dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
- dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
- }
+ if ((int)(dcn3_21_soc.sr_enter_plus_exit_time_us * 1000)
+ != dc->bb_overrides.sr_enter_plus_exit_time_ns
+ && dc->bb_overrides.sr_enter_plus_exit_time_ns) {
+ dcn3_21_soc.sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+ }
- if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000)
- != dc->bb_overrides.dram_clock_change_latency_ns
- && dc->bb_overrides.dram_clock_change_latency_ns) {
- dcn3_21_soc.dram_clock_change_latency_us =
- dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- }
+ if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
+ && dc->bb_overrides.urgent_latency_ns) {
+ dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ }
- if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000)
- != dc->bb_overrides.fclk_clock_change_latency_ns
- && dc->bb_overrides.fclk_clock_change_latency_ns) {
- dcn3_21_soc.fclk_change_latency_us =
- dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
- }
+ if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000)
+ != dc->bb_overrides.dram_clock_change_latency_ns
+ && dc->bb_overrides.dram_clock_change_latency_ns) {
+ dcn3_21_soc.dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+ }
- if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000)
- != dc->bb_overrides.dummy_clock_change_latency_ns
- && dc->bb_overrides.dummy_clock_change_latency_ns) {
- dcn3_21_soc.dummy_pstate_latency_us =
- dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
- }
+ if ((int)(dcn3_21_soc.fclk_change_latency_us * 1000)
+ != dc->bb_overrides.fclk_clock_change_latency_ns
+ && dc->bb_overrides.fclk_clock_change_latency_ns) {
+ dcn3_21_soc.fclk_change_latency_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
+ }
- /* Override from VBIOS if VBIOS bb_info available */
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
- struct bp_soc_bb_info bb_info = {0};
+ if ((int)(dcn3_21_soc.dummy_pstate_latency_us * 1000)
+ != dc->bb_overrides.dummy_clock_change_latency_ns
+ && dc->bb_overrides.dummy_clock_change_latency_ns) {
+ dcn3_21_soc.dummy_pstate_latency_us =
+ dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
+ }
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
- if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_21_soc.dram_clock_change_latency_us =
- bb_info.dram_clock_change_latency_100ns * 10;
+ /* Override from VBIOS if VBIOS bb_info available */
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+ struct bp_soc_bb_info bb_info = {0};
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_21_soc.sr_enter_plus_exit_time_us =
- bb_info.dram_sr_enter_exit_latency_100ns * 10;
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
+ if (bb_info.dram_clock_change_latency_100ns > 0)
+ dcn3_21_soc.dram_clock_change_latency_us =
+ bb_info.dram_clock_change_latency_100ns * 10;
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_21_soc.sr_exit_time_us =
- bb_info.dram_sr_exit_latency_100ns * 10;
- }
- }
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_21_soc.sr_enter_plus_exit_time_us =
+ bb_info.dram_sr_enter_exit_latency_100ns * 10;
- /* Override from VBIOS for num_chan */
- if (dc->ctx->dc_bios->vram_info.num_chans) {
- dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
- dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
- dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_21_soc.sr_exit_time_us =
+ bb_info.dram_sr_exit_latency_100ns * 10;
}
+ }
- if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
- dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+ /* Override from VBIOS for num_chan */
+ if (dc->ctx->dc_bios->vram_info.num_chans) {
+ dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
+ dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
+ dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
}
+ if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+
/* DML DSC delay factor workaround */
dcn3_21_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0;
@@ -555,150 +692,149 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
/* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */
- if ((!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) && (bw_params->clk_table.entries[0].memclk_mhz)) {
- if (dc->debug.use_legacy_soc_bb_mechanism) {
- unsigned int i = 0, j = 0, num_states = 0;
-
- unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
- unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
- unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
- unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
-
- unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {615, 906, 1324, 1564};
- unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0;
- unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
-
- for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
- if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
- max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
- if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
- if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
- if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
- max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
- }
- if (!max_dcfclk_mhz)
- max_dcfclk_mhz = dcn3_21_soc.clock_limits[0].dcfclk_mhz;
- if (!max_dispclk_mhz)
- max_dispclk_mhz = dcn3_21_soc.clock_limits[0].dispclk_mhz;
- if (!max_dppclk_mhz)
- max_dppclk_mhz = dcn3_21_soc.clock_limits[0].dppclk_mhz;
- if (!max_phyclk_mhz)
- max_phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz;
-
- if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
- // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
- dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
- num_dcfclk_sta_targets++;
- } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
- // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
- for (i = 0; i < num_dcfclk_sta_targets; i++) {
- if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
- dcfclk_sta_targets[i] = max_dcfclk_mhz;
- break;
- }
+ if (dc->debug.use_legacy_soc_bb_mechanism) {
+ unsigned int i = 0, j = 0, num_states = 0;
+
+ unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
+ unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
+ unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
+ unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
+
+ unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {615, 906, 1324, 1564};
+ unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0;
+ unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
+
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ }
+ if (!max_dcfclk_mhz)
+ max_dcfclk_mhz = dcn3_21_soc.clock_limits[0].dcfclk_mhz;
+ if (!max_dispclk_mhz)
+ max_dispclk_mhz = dcn3_21_soc.clock_limits[0].dispclk_mhz;
+ if (!max_dppclk_mhz)
+ max_dppclk_mhz = dcn3_21_soc.clock_limits[0].dppclk_mhz;
+ if (!max_phyclk_mhz)
+ max_phyclk_mhz = dcn3_21_soc.clock_limits[0].phyclk_mhz;
+
+ if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
+ dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
+ num_dcfclk_sta_targets++;
+ } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
+ for (i = 0; i < num_dcfclk_sta_targets; i++) {
+ if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
+ dcfclk_sta_targets[i] = max_dcfclk_mhz;
+ break;
}
- // Update size of array since we "removed" duplicates
- num_dcfclk_sta_targets = i + 1;
}
+ // Update size of array since we "removed" duplicates
+ num_dcfclk_sta_targets = i + 1;
+ }
- num_uclk_states = bw_params->clk_table.num_entries;
+ num_uclk_states = bw_params->clk_table.num_entries;
- // Calculate optimal dcfclk for each uclk
- for (i = 0; i < num_uclk_states; i++) {
- dcn321_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
- &optimal_dcfclk_for_uclk[i], NULL);
- if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
- optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
- }
+ // Calculate optimal dcfclk for each uclk
+ for (i = 0; i < num_uclk_states; i++) {
+ dcn321_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+ &optimal_dcfclk_for_uclk[i], NULL);
+ if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
+ optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
}
+ }
- // Calculate optimal uclk for each dcfclk sta target
- for (i = 0; i < num_dcfclk_sta_targets; i++) {
- for (j = 0; j < num_uclk_states; j++) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
- optimal_uclk_for_dcfclk_sta_targets[i] =
- bw_params->clk_table.entries[j].memclk_mhz * 16;
- break;
- }
+ // Calculate optimal uclk for each dcfclk sta target
+ for (i = 0; i < num_dcfclk_sta_targets; i++) {
+ for (j = 0; j < num_uclk_states; j++) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
+ optimal_uclk_for_dcfclk_sta_targets[i] =
+ bw_params->clk_table.entries[j].memclk_mhz * 16;
+ break;
}
}
+ }
- i = 0;
- j = 0;
- // create the final dcfclk and uclk table
- while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
- dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
- dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+ i = 0;
+ j = 0;
+ // create the final dcfclk and uclk table
+ while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+ dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+ } else {
+ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
} else {
- if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
- dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
- dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
- } else {
- j = num_uclk_states;
- }
+ j = num_uclk_states;
}
}
+ }
- while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
- dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
- dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
- }
+ while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
+ dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+ dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+ }
- while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
- optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
- dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
- dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
- }
+ while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
+ optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+ dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ }
- dcn3_21_soc.num_states = num_states;
- for (i = 0; i < dcn3_21_soc.num_states; i++) {
- dcn3_21_soc.clock_limits[i].state = i;
- dcn3_21_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
- dcn3_21_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
-
- /* Fill all states with max values of all these clocks */
- dcn3_21_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
- dcn3_21_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
- dcn3_21_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
- dcn3_21_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3;
-
- /* Populate from bw_params for DTBCLK, SOCCLK */
- if (i > 0) {
- if (!bw_params->clk_table.entries[i].dtbclk_mhz) {
- dcn3_21_soc.clock_limits[i].dtbclk_mhz = dcn3_21_soc.clock_limits[i-1].dtbclk_mhz;
- } else {
- dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
- }
- } else if (bw_params->clk_table.entries[i].dtbclk_mhz) {
+ dcn3_21_soc.num_states = num_states;
+ for (i = 0; i < dcn3_21_soc.num_states; i++) {
+ dcn3_21_soc.clock_limits[i].state = i;
+ dcn3_21_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
+ dcn3_21_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
+
+ /* Fill all states with max values of all these clocks */
+ dcn3_21_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
+ dcn3_21_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
+ dcn3_21_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
+ dcn3_21_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3;
+
+ /* Populate from bw_params for DTBCLK, SOCCLK */
+ if (i > 0) {
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz) {
+ dcn3_21_soc.clock_limits[i].dtbclk_mhz = dcn3_21_soc.clock_limits[i-1].dtbclk_mhz;
+ } else {
dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
}
+ } else if (bw_params->clk_table.entries[i].dtbclk_mhz) {
+ dcn3_21_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ }
- if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
- dcn3_21_soc.clock_limits[i].socclk_mhz = dcn3_21_soc.clock_limits[i-1].socclk_mhz;
- else
- dcn3_21_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
+ dcn3_21_soc.clock_limits[i].socclk_mhz = dcn3_21_soc.clock_limits[i-1].socclk_mhz;
+ else
+ dcn3_21_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
- if (!dram_speed_mts[i] && i > 0)
- dcn3_21_soc.clock_limits[i].dram_speed_mts = dcn3_21_soc.clock_limits[i-1].dram_speed_mts;
- else
- dcn3_21_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
+ if (!dram_speed_mts[i] && i > 0)
+ dcn3_21_soc.clock_limits[i].dram_speed_mts = dcn3_21_soc.clock_limits[i-1].dram_speed_mts;
+ else
+ dcn3_21_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
- /* These clocks cannot come from bw_params, always fill from dcn3_21_soc[0] */
- /* PHYCLK_D18, PHYCLK_D32 */
- dcn3_21_soc.clock_limits[i].phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz;
- dcn3_21_soc.clock_limits[i].phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz;
- }
- } else {
- build_synthetic_soc_states(bw_params, dcn3_21_soc.clock_limits, &dcn3_21_soc.num_states);
+ /* These clocks cannot come from bw_params, always fill from dcn3_21_soc[0] */
+ /* PHYCLK_D18, PHYCLK_D32 */
+ dcn3_21_soc.clock_limits[i].phyclk_d18_mhz = dcn3_21_soc.clock_limits[0].phyclk_d18_mhz;
+ dcn3_21_soc.clock_limits[i].phyclk_d32_mhz = dcn3_21_soc.clock_limits[0].phyclk_d32_mhz;
}
-
- /* Re-init DML with updated bb */
- dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32);
- if (dc->current_state)
- dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32);
+ } else {
+ build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params,
+ dcn3_21_soc.clock_limits, &dcn3_21_soc.num_states);
}
+
+ /* Re-init DML with updated bb */
+ dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32);
+ if (dc->current_state)
+ dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h
index e8fad9b4be69..c6623b3705ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h
@@ -29,10 +29,6 @@
#include "dml/display_mode_vba.h"
-void dcn321_insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
- unsigned int *num_entries,
- struct _vcs_dpi_voltage_scaling_st *entry);
-
void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
index 0bffae95f3a2..d5831a34f5a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -190,6 +190,14 @@ enum dm_validation_status {
DML_FAIL_DSC_INPUT_BPC,
DML_FAIL_PREFETCH_SUPPORT,
DML_FAIL_V_RATIO_PREFETCH,
+ DML_FAIL_P2I_WITH_420,
+ DML_FAIL_DSC_ONLY_IF_NECESSARY_WITH_BPP,
+ DML_FAIL_NOT_DSC422_NATIVE,
+ DML_FAIL_ODM_COMBINE4TO1,
+ DML_FAIL_ENOUGH_WRITEBACK_UNITS,
+ DML_FAIL_VIEWPORT_EXCEEDS_SURFACE,
+ DML_FAIL_DYNAMIC_METADATA,
+ DML_FAIL_FMT_BUFFER_EXCEEDED,
};
enum writeback_config {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index bdf3ac6cadd5..da0cfbb071e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -113,7 +113,6 @@ void dml_init_instance(struct display_mode_lib *lib,
lib->funcs = dml30_funcs;
break;
case DML_PROJECT_DCN31:
- case DML_PROJECT_DCN31_FPGA:
case DML_PROJECT_DCN315:
lib->funcs = dml31_funcs;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index a9d49ef58fb5..5edf69fa40d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -41,7 +41,6 @@ enum dml_project {
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
DML_PROJECT_DCN315,
- DML_PROJECT_DCN31_FPGA,
DML_PROJECT_DCN314,
DML_PROJECT_DCN32,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 3c077164f362..fb17f8868cb4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -167,6 +167,7 @@ struct _vcs_dpi_voltage_scaling_st {
double phyclk_mhz;
double dppclk_mhz;
double dtbclk_mhz;
+ float net_bw_in_kbytes_sec;
};
/**
@@ -619,8 +620,7 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_h_blank_end;
unsigned int dlg_vblank_end;
unsigned int min_dst_y_next_start;
- unsigned int optimized_min_dst_y_next_start;
- unsigned int optimized_min_dst_y_next_start_us;
+ unsigned int min_dst_y_next_start_us;
unsigned int refcyc_per_htotal;
unsigned int refcyc_x_after_scaler;
unsigned int dst_y_after_scaler;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index f9653f511baa..9a3ded311195 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -571,6 +571,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.OutputLinkDPRate[mode_lib->vba.NumberOfActivePlanes] = dout->dp_rate;
mode_lib->vba.ODMUse[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine_policy;
mode_lib->vba.DETSizeOverride[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
+ if (src->det_size_override)
+ mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
+ else
+ mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = ip->det_buffer_size_kbytes;
//TODO: Need to assign correct values to dp_multistream vars
mode_lib->vba.OutputMultistreamEn[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_en;
mode_lib->vba.OutputMultistreamId[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_id;
@@ -785,6 +789,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[k] =
mode_lib->vba.NumberOfActivePlanes;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
+ if (src_k->det_size_override)
+ mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src_k->det_size_override;
if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
== dm_horz) {
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
@@ -927,18 +933,16 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
}
/**
- * ********************************************************************************************
* cache_debug_params: Cache any params that needed to be maintained from the initial validation
* for debug purposes.
*
* The DML getters can modify some of the VBA params that we are interested in (for example when
* calculating with dummy p-state latency), so cache any params here that we want for debugging
*
- * @param [in] mode_lib: mode_lib input/output of validate call
+ * @mode_lib: mode_lib input/output of validate call
*
- * @return: void
+ * Return: void
*
- * ********************************************************************************************
*/
static void cache_debug_params(struct display_mode_lib *mode_lib)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 2bdc47615543..3966845c7694 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -40,6 +40,8 @@ static bool dsc_policy_enable_dsc_when_not_needed;
static bool dsc_policy_disable_dsc_stream_overhead;
+static bool disable_128b_132b_stream_overhead;
+
#ifndef MAX
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
#endif
@@ -47,8 +49,44 @@ static bool dsc_policy_disable_dsc_stream_overhead;
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
+/* Need to account for padding due to pixel-to-symbol packing
+ * for uncompressed 128b/132b streams.
+ */
+static uint32_t apply_128b_132b_stream_overhead(
+ const struct dc_crtc_timing *timing, const uint32_t kbps)
+{
+ uint32_t total_kbps = kbps;
+
+ if (disable_128b_132b_stream_overhead)
+ return kbps;
+
+ if (!timing->flags.DSC) {
+ struct fixed31_32 bpp;
+ struct fixed31_32 overhead_factor;
+
+ bpp = dc_fixpt_from_int(kbps);
+ bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10);
+
+ /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size)
+ * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive
+ */
+ overhead_factor = dc_fixpt_from_int(timing->h_addressable);
+ overhead_factor = dc_fixpt_mul(overhead_factor, bpp);
+ overhead_factor = dc_fixpt_div_int(overhead_factor, 128);
+ overhead_factor = dc_fixpt_div(
+ dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)),
+ overhead_factor);
+
+ total_kbps = dc_fixpt_ceil(
+ dc_fixpt_mul_int(overhead_factor, total_kbps));
+ }
+
+ return total_kbps;
+}
+
uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing)
+ const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding)
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
@@ -96,6 +134,9 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
kbps = kbps * 2 / 3;
}
+ if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
+ kbps = apply_128b_132b_stream_overhead(timing, kbps);
+
return kbps;
}
@@ -107,6 +148,7 @@ static bool decide_dsc_bandwidth_range(
const uint32_t num_slices_h,
const struct dsc_enc_caps *dsc_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range);
static uint32_t compute_bpp_x16_from_target_bandwidth(
@@ -133,6 +175,7 @@ static bool setup_dsc_config(
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -398,6 +441,7 @@ bool dc_dsc_compute_bandwidth_range(
uint32_t max_bpp_x16,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range)
{
bool is_dsc_possible = false;
@@ -417,11 +461,11 @@ bool dc_dsc_compute_bandwidth_range(
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- &options, &config);
+ &options, link_encoding, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
- config.num_slices_h, &dsc_common_caps, timing, range);
+ config.num_slices_h, &dsc_common_caps, timing, link_encoding, range);
return is_dsc_possible;
}
@@ -557,6 +601,7 @@ static bool decide_dsc_bandwidth_range(
const uint32_t num_slices_h,
const struct dsc_enc_caps *dsc_caps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_bw_range *range)
{
uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
@@ -586,7 +631,7 @@ static bool decide_dsc_bandwidth_range(
/* populate output structure */
if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) {
/* native stream bandwidth */
- range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
+ range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing, link_encoding);
/* max dsc target bpp */
range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
@@ -612,6 +657,7 @@ static bool decide_dsc_target_bpp_x16(
const int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const int num_slices_h,
+ const enum dc_link_encoding_format link_encoding,
int *target_bpp_x16)
{
struct dc_dsc_bw_range range;
@@ -619,7 +665,7 @@ static bool decide_dsc_target_bpp_x16(
*target_bpp_x16 = 0;
if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16,
- num_slices_h, dsc_common_caps, timing, &range)) {
+ num_slices_h, dsc_common_caps, timing, link_encoding, &range)) {
if (target_bandwidth_kbps >= range.stream_kbps) {
if (policy->enable_dsc_when_not_needed)
/* enable max bpp even dsc is not needed */
@@ -645,8 +691,6 @@ static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *av
{
int idx = 0;
- memset(available_slices, -1, MIN_AVAILABLE_SLICES_SIZE);
-
if (slice_caps.bits.NUM_SLICES_1)
available_slices[idx++] = 1;
@@ -700,7 +744,7 @@ static int inc_num_slices(union dsc_enc_slice_caps slice_caps, int num_slices)
}
}
- if (new_num_slices == num_slices) // No biger number of slices found
+ if (new_num_slices == num_slices) // No bigger number of slices found
new_num_slices++;
return new_num_slices;
@@ -798,6 +842,7 @@ static bool setup_dsc_config(
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
@@ -952,6 +997,13 @@ static bool setup_dsc_config(
else
is_dsc_possible = false;
}
+ // When we force 2:1 ODM, we can't have 1 slice to divide amongst 2 separate DSC instances
+ // need to enforce at minimum 2 horizontal slices
+ if (options->dsc_force_odm_hslice_override) {
+ num_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, 2);
+ if (num_slices_h == 0)
+ is_dsc_possible = false;
+ }
if (!is_dsc_possible)
goto done;
@@ -990,6 +1042,7 @@ static bool setup_dsc_config(
target_bandwidth_kbps,
timing,
num_slices_h,
+ link_encoding,
&target_bpp);
dsc_cfg->bits_per_pixel = target_bpp;
}
@@ -1018,6 +1071,7 @@ bool dc_dsc_compute_config(
const struct dc_dsc_config_options *options,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding,
struct dc_dsc_config *dsc_cfg)
{
bool is_dsc_possible = false;
@@ -1027,7 +1081,7 @@ bool dc_dsc_compute_config(
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, options, dsc_cfg);
+ timing, options, link_encoding, dsc_cfg);
return is_dsc_possible;
}
@@ -1160,9 +1214,15 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable)
dsc_policy_disable_dsc_stream_overhead = disable;
}
+void dc_set_disable_128b_132b_stream_overhead(bool disable)
+{
+ disable_128b_132b_stream_overhead = disable;
+}
+
void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options)
{
options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override;
+ options->dsc_force_odm_hslice_override = dc->debug.force_odm_combine;
options->max_target_bpp_limit_override_x16 = 0;
options->slice_height_granularity = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
index 59884ef651b3..4a2bf81286d8 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -31,21 +31,21 @@
/****************************** new register headers */
/*** following in header */
-#define DDC_GPIO_REG_LIST_ENTRY(type,cd,id) \
+#define DDC_GPIO_REG_LIST_ENTRY(type, cd, id) \
.type ## _reg = REG(DC_GPIO_DDC ## id ## _ ## type),\
.type ## _mask = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## _MASK,\
.type ## _shift = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## __SHIFT
-#define DDC_GPIO_REG_LIST(cd,id) \
+#define DDC_GPIO_REG_LIST(cd, id) \
{\
- DDC_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
- DDC_GPIO_REG_LIST_ENTRY(A,cd,id),\
- DDC_GPIO_REG_LIST_ENTRY(EN,cd,id),\
- DDC_GPIO_REG_LIST_ENTRY(Y,cd,id)\
+ DDC_GPIO_REG_LIST_ENTRY(MASK, cd, id),\
+ DDC_GPIO_REG_LIST_ENTRY(A, cd, id),\
+ DDC_GPIO_REG_LIST_ENTRY(EN, cd, id),\
+ DDC_GPIO_REG_LIST_ENTRY(Y, cd, id)\
}
-#define DDC_REG_LIST(cd,id) \
- DDC_GPIO_REG_LIST(cd,id),\
+#define DDC_REG_LIST(cd, id) \
+ DDC_GPIO_REG_LIST(cd, id),\
.ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP)
#define DDC_REG_LIST_DCN2(cd, id) \
@@ -54,34 +54,34 @@
.phy_aux_cntl = REG(PHY_AUX_CNTL), \
.dc_gpio_aux_ctrl_5 = REG(DC_GPIO_AUX_CTRL_5)
-#define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\
+#define DDC_GPIO_VGA_REG_LIST_ENTRY(type, cd)\
.type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\
.type ## _mask = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## _MASK,\
.type ## _shift = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## __SHIFT
#define DDC_GPIO_VGA_REG_LIST(cd) \
{\
- DDC_GPIO_VGA_REG_LIST_ENTRY(MASK,cd),\
- DDC_GPIO_VGA_REG_LIST_ENTRY(A,cd),\
- DDC_GPIO_VGA_REG_LIST_ENTRY(EN,cd),\
- DDC_GPIO_VGA_REG_LIST_ENTRY(Y,cd)\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(MASK, cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(A, cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(EN, cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(Y, cd)\
}
#define DDC_VGA_REG_LIST(cd) \
DDC_GPIO_VGA_REG_LIST(cd),\
.ddc_setup = mmDC_I2C_DDCVGA_SETUP
-#define DDC_GPIO_I2C_REG_LIST_ENTRY(type,cd) \
+#define DDC_GPIO_I2C_REG_LIST_ENTRY(type, cd) \
.type ## _reg = REG(DC_GPIO_I2CPAD_ ## type),\
.type ## _mask = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## _MASK,\
.type ## _shift = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## __SHIFT
#define DDC_GPIO_I2C_REG_LIST(cd) \
{\
- DDC_GPIO_I2C_REG_LIST_ENTRY(MASK,cd),\
- DDC_GPIO_I2C_REG_LIST_ENTRY(A,cd),\
- DDC_GPIO_I2C_REG_LIST_ENTRY(EN,cd),\
- DDC_GPIO_I2C_REG_LIST_ENTRY(Y,cd)\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(MASK, cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(A, cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(EN, cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(Y, cd)\
}
#define DDC_I2C_REG_LIST(cd) \
@@ -150,12 +150,12 @@ struct ddc_sh_mask {
#define ddc_data_regs(id) \
{\
- DDC_REG_LIST(DATA,id)\
+ DDC_REG_LIST(DATA, id)\
}
#define ddc_clk_regs(id) \
{\
- DDC_REG_LIST(CLK,id)\
+ DDC_REG_LIST(CLK, id)\
}
#define ddc_vga_data_regs \
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
index dcfdd71b2304..debb363cfcf4 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
@@ -36,17 +36,17 @@
#define ONE_MORE_5 6
-#define HPD_GPIO_REG_LIST_ENTRY(type,cd,id) \
+#define HPD_GPIO_REG_LIST_ENTRY(type, cd, id) \
.type ## _reg = REG(DC_GPIO_HPD_## type),\
.type ## _mask = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## _MASK,\
.type ## _shift = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## __SHIFT
#define HPD_GPIO_REG_LIST(id) \
{\
- HPD_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
- HPD_GPIO_REG_LIST_ENTRY(A,cd,id),\
- HPD_GPIO_REG_LIST_ENTRY(EN,cd,id),\
- HPD_GPIO_REG_LIST_ENTRY(Y,cd,id)\
+ HPD_GPIO_REG_LIST_ENTRY(MASK, cd, id),\
+ HPD_GPIO_REG_LIST_ENTRY(A, cd, id),\
+ HPD_GPIO_REG_LIST_ENTRY(EN, cd, id),\
+ HPD_GPIO_REG_LIST_ENTRY(Y, cd, id)\
}
#define HPD_REG_LIST(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 2eb597a24425..027aec70c070 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -37,6 +37,7 @@
#include "dwb.h"
#include "mcif_wb.h"
#include "panel_cntl.h"
+#include "dmub/inc/dmub_cmd.h"
#define MAX_CLOCK_SOURCES 7
#define MAX_SVP_PHANTOM_STREAMS 2
@@ -124,39 +125,15 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *context);
- /*
- * Acquires a free pipe for the head pipe.
- * The head pipe is first pipe in the current context that matches the stream
- * and does not have a top pipe or prev_odm_pipe.
- */
- struct pipe_ctx *(*acquire_idle_pipe_for_layer)(
- struct dc_state *context,
- const struct resource_pool *pool,
- struct dc_stream_state *stream);
-
- /*
- * Acquires a free pipe for the head pipe with some additional checks for odm.
- * The head pipe is passed in as an argument unlike acquire_idle_pipe_for_layer
- * where it is read from the context. So this allows us look for different
- * idle_pipe if the head_pipes are different ( ex. in odm 2:1 when we have
- * a left and right pipe ).
- *
- * It also checks the old context to see if:
- *
- * 1. a pipe has already been allocated for the head pipe. If so, it will
- * try to select that pipe as the idle pipe if it is available in the current
- * context.
- * 2. if the head_pipe is on the left, it will check if the right pipe has
- * a pipe already allocated. If so, it will not use that pipe if it is
- * selected as the idle pipe.
- */
- struct pipe_ctx *(*acquire_idle_pipe_for_head_pipe_in_layer)(
- struct dc_state *context,
+ struct pipe_ctx *(*acquire_free_pipe_as_secondary_dpp_pipe)(
+ const struct dc_state *cur_ctx,
+ struct dc_state *new_ctx,
const struct resource_pool *pool,
- struct dc_stream_state *stream,
- struct pipe_ctx *head_pipe);
+ const struct pipe_ctx *opp_head_pipe);
- enum dc_status (*validate_plane)(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+ enum dc_status (*validate_plane)(
+ const struct dc_plane_state *plane_state,
+ struct dc_caps *caps);
enum dc_status (*add_stream_to_ctx)(
struct dc *dc,
@@ -303,6 +280,8 @@ struct resource_pool {
struct dmcu *dmcu;
struct dmub_psr *psr;
+ struct dmub_replay *replay;
+
struct abm *multiple_abms[MAX_PIPES];
const struct resource_funcs *funcs;
@@ -374,6 +353,7 @@ union pipe_update_flags {
uint32_t viewport : 1;
uint32_t plane_changed : 1;
uint32_t det_size : 1;
+ uint32_t unbounded_req : 1;
} bits;
uint32_t raw;
};
@@ -426,6 +406,8 @@ struct pipe_ctx {
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
union pipe_update_flags update_flags;
+ struct tg_color visual_confirm_color;
+ bool has_vactive_margin;
};
/* Data used for dynamic link encoder assignment.
@@ -496,6 +478,11 @@ struct bw_context {
struct display_mode_lib dml;
};
+struct dc_dmub_cmd {
+ union dmub_rb_cmd dmub_cmd;
+ enum dm_dmub_wait_type wait_type;
+};
+
/**
* struct dc_state - The full description of a state requested by users
*/
@@ -544,6 +531,11 @@ struct dc_state {
*/
struct bw_context bw_ctx;
+ struct block_sequence block_sequence[50];
+ unsigned int block_sequence_steps;
+ struct dc_dmub_cmd dc_dmub_cmd[10];
+ unsigned int dmub_cmd_count;
+
/**
* @refcount: refcount reference
*
@@ -558,6 +550,23 @@ struct dc_state {
} perf_params;
};
+struct replay_context {
+ /* ddc line */
+ enum channel_id aux_inst;
+ /* Transmitter id */
+ enum transmitter digbe_inst;
+ /* Engine Id is used for Dig Be source select */
+ enum engine_id digfe_inst;
+ /* Controller Id used for Dig Fe source select */
+ enum controller_id controllerId;
+ unsigned int line_time_in_ns;
+};
+
+enum dc_replay_enable {
+ DC_REPLAY_DISABLE = 0,
+ DC_REPLAY_ENABLE = 1,
+};
+
struct dc_bounding_box_max_clk {
int max_dcfclk_mhz;
int max_dispclk_mhz;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index ecb4191b6e64..33db15d69f23 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -27,6 +27,8 @@
#include "dm_services_types.h"
+struct abm_save_restore;
+
struct abm {
struct dc_context *ctx;
const struct abm_funcs *funcs;
@@ -55,6 +57,14 @@ struct abm_funcs {
unsigned int bytes,
unsigned int inst);
bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst);
+ bool (*save_restore)(
+ struct abm *abm,
+ unsigned int panel_inst,
+ struct abm_save_restore *pData);
+ bool (*set_pipe_ex)(struct abm *abm,
+ unsigned int otg_inst,
+ unsigned int option,
+ unsigned int panel_inst);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
index 7254182b7c72..af6b9509d09d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -172,8 +172,6 @@ struct aux_engine_funcs {
struct aux_engine *engine,
uint8_t *returned_bytes);
bool (*is_engine_available)(struct aux_engine *engine);
- enum i2caux_engine_type (*get_engine_type)(
- const struct aux_engine *engine);
bool (*acquire)(
struct aux_engine *engine,
struct ddc *ddc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index bef843cc32a1..ecb7bcc39469 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -230,9 +230,11 @@ struct clk_bw_params {
unsigned int dram_channel_width_bytes;
unsigned int dispclk_vco_khz;
unsigned int dc_mode_softmax_memclk;
+ unsigned int max_memclk_mhz;
struct clk_limit_table clk_table;
struct wm_table wm_table;
struct dummy_pstate_entry dummy_pstate_table[4];
+ struct clk_limit_table_entry dc_mode_limit;
};
/* Public interfaces */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index ad6acd1b34e1..3e2f0f64c98c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -68,6 +68,7 @@ struct dccg {
const struct dccg_funcs *funcs;
int pipe_dppclk_khz[MAX_PIPES];
int ref_dppclk;
+ bool dpp_clock_gated[MAX_PIPES];
//int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */
//int audio_dtbclk_khz;/* TODO needs to be removed */
//int ref_dtbclk_khz;/* TODO needs to be removed */
@@ -122,6 +123,11 @@ struct dccg_funcs {
struct dccg *dccg,
int hpo_le_inst);
+ void (*set_symclk32_le_root_clock_gating)(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ bool enable);
+
void (*set_physymclk)(
struct dccg *dccg,
int phy_inst,
@@ -159,10 +165,23 @@ struct dccg_funcs {
int otg_inst,
int pixclk_khz);
+ void (*trigger_dio_fifo_resync)(
+ struct dccg *dccg);
+
void (*dpp_root_clock_control)(
struct dccg *dccg,
unsigned int dpp_inst,
bool clock_on);
+
+ void (*enable_symclk_se)(
+ struct dccg *dccg,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst);
+
+ void (*disable_symclk_se)(
+ struct dccg *dccg,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index beb26dc8a07f..f5677dbb4e7d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -49,6 +49,8 @@ struct dcn_hubbub_wm_set {
uint32_t dram_clk_change;
uint32_t usr_retrain;
uint32_t fclk_pstate_change;
+ uint32_t sr_enter_exit_Z8;
+ uint32_t sr_enter_Z8;
};
struct dcn_hubbub_wm {
@@ -111,6 +113,9 @@ struct dcn_hubbub_state {
uint32_t vm_error_vmid;
uint32_t vm_error_pipe;
uint32_t vm_error_mode;
+ uint32_t test_debug_data;
+ uint32_t watermark_change_cntl;
+ uint32_t dram_state_cntl;
};
struct hubbub_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index c4fbbf08ef86..a6dedf3c7d74 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -269,6 +269,7 @@ struct stream_encoder_funcs {
struct stream_encoder *enc, unsigned int pix_per_container);
void (*enable_fifo)(struct stream_encoder *enc);
void (*disable_fifo)(struct stream_encoder *enc);
+ void (*map_stream_to_link)(struct stream_encoder *enc, uint32_t stream_enc_inst, uint32_t link_enc_inst);
};
struct hpo_dp_stream_encoder_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 88ac723d10aa..02ff99f7bec2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -44,6 +44,138 @@ struct dc_virtual_addr_space_config;
struct dpp;
struct dce_hwseq;
struct link_resource;
+struct dc_dmub_cmd;
+
+struct subvp_pipe_control_lock_fast_params {
+ struct dc *dc;
+ bool lock;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct pipe_control_lock_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ bool lock;
+};
+
+struct set_flip_control_gsl_params {
+ struct pipe_ctx *pipe_ctx;
+ bool flip_immediate;
+};
+
+struct program_triplebuffer_params {
+ const struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ bool enableTripleBuffer;
+};
+
+struct update_plane_addr_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_input_transfer_func_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_plane_state *plane_state;
+};
+
+struct program_gamut_remap_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_manual_trigger_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct send_dmcub_cmd_params {
+ struct dc_context *ctx;
+ union dmub_rb_cmd *cmd;
+ enum dm_dmub_wait_type wait_type;
+};
+
+struct setup_dpp_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_bias_and_scale_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_output_transfer_func_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ const struct dc_stream_state *stream;
+};
+
+struct update_visual_confirm_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ int mpcc_id;
+};
+
+struct power_on_mpc_mem_pwr_params {
+ struct mpc *mpc;
+ int mpcc_id;
+ bool power_on;
+};
+
+struct set_output_csc_params {
+ struct mpc *mpc;
+ int opp_id;
+ const uint16_t *regval;
+ enum mpc_output_csc_mode ocsc_mode;
+};
+
+struct set_ocsc_default_params {
+ struct mpc *mpc;
+ int opp_id;
+ enum dc_color_space color_space;
+ enum mpc_output_csc_mode ocsc_mode;
+};
+
+union block_sequence_params {
+ struct update_plane_addr_params update_plane_addr_params;
+ struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params;
+ struct pipe_control_lock_params pipe_control_lock_params;
+ struct set_flip_control_gsl_params set_flip_control_gsl_params;
+ struct program_triplebuffer_params program_triplebuffer_params;
+ struct set_input_transfer_func_params set_input_transfer_func_params;
+ struct program_gamut_remap_params program_gamut_remap_params;
+ struct program_manual_trigger_params program_manual_trigger_params;
+ struct send_dmcub_cmd_params send_dmcub_cmd_params;
+ struct setup_dpp_params setup_dpp_params;
+ struct program_bias_and_scale_params program_bias_and_scale_params;
+ struct set_output_transfer_func_params set_output_transfer_func_params;
+ struct update_visual_confirm_params update_visual_confirm_params;
+ struct power_on_mpc_mem_pwr_params power_on_mpc_mem_pwr_params;
+ struct set_output_csc_params set_output_csc_params;
+ struct set_ocsc_default_params set_ocsc_default_params;
+};
+
+enum block_sequence_func {
+ DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST = 0,
+ OPTC_PIPE_CONTROL_LOCK,
+ HUBP_SET_FLIP_CONTROL_GSL,
+ HUBP_PROGRAM_TRIPLEBUFFER,
+ HUBP_UPDATE_PLANE_ADDR,
+ DPP_SET_INPUT_TRANSFER_FUNC,
+ DPP_PROGRAM_GAMUT_REMAP,
+ OPTC_PROGRAM_MANUAL_TRIGGER,
+ DMUB_SEND_DMCUB_CMD,
+ DPP_SETUP_DPP,
+ DPP_PROGRAM_BIAS_AND_SCALE,
+ DPP_SET_OUTPUT_TRANSFER_FUNC,
+ MPC_UPDATE_VISUAL_CONFIRM,
+ MPC_POWER_ON_MPC_MEM_PWR,
+ MPC_SET_OUTPUT_CSC,
+ MPC_SET_OCSC_DEFAULT,
+};
+
+struct block_sequence {
+ union block_sequence_params params;
+ enum block_sequence_func func;
+};
struct hw_sequencer_funcs {
void (*hardware_release)(struct dc *dc);
@@ -252,12 +384,12 @@ struct hw_sequencer_funcs {
const struct tg_color *solid_color,
int width, int height, int offset);
+ void (*subvp_pipe_control_lock_fast)(union block_sequence_params *params);
void (*z10_restore)(const struct dc *dc);
void (*z10_save_init)(struct dc *dc);
void (*update_visual_confirm_color)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
- struct tg_color *color,
int mpcc_id);
void (*update_phantom_vp_position)(struct dc *dc,
@@ -294,6 +426,7 @@ void get_surface_visual_confirm_color(
void get_subvp_visual_confirm_color(
struct dc *dc,
+ struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
@@ -306,4 +439,36 @@ void get_mpctree_visual_confirm_color(
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+
+void get_mclk_switch_visual_confirm_color(
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
+void hwss_execute_sequence(struct dc *dc,
+ struct block_sequence block_sequence[],
+ int num_steps);
+
+void hwss_build_fast_sequence(struct dc *dc,
+ struct dc_dmub_cmd *dc_dmub_cmd,
+ unsigned int dmub_cmd_count,
+ struct block_sequence block_sequence[],
+ int *num_steps,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_send_dmcub_cmd(union block_sequence_params *params);
+
+void hwss_program_manual_trigger(union block_sequence_params *params);
+
+void hwss_setup_dpp(union block_sequence_params *params);
+
+void hwss_program_bias_and_scale(union block_sequence_params *params);
+
+void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params);
+
+void hwss_set_output_csc(union block_sequence_params *params);
+
+void hwss_set_ocsc_default(union block_sequence_params *params);
+
#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index 4513544559be..4ca4192c1e12 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -160,6 +160,8 @@ struct hwseq_private_funcs {
unsigned int *k1_div,
unsigned int *k2_div);
void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx);
+ void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
+ struct dc_state *context);
bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index f839494d59d8..e3e8c76c17cf 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -179,6 +179,10 @@ struct link_service {
int (*aux_transfer_raw)(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_return_code_type *operation_result);
+ bool (*configure_fixed_vs_pe_retimer)(
+ struct ddc_service *ddc,
+ const uint8_t *data,
+ uint32_t len);
bool (*aux_transfer_with_retries_no_mutex)(struct ddc_service *ddc,
struct aux_payload *payload);
bool (*is_in_aux_transaction_mode)(struct ddc_service *ddc);
@@ -269,6 +273,20 @@ struct link_service {
uint16_t psr_vtotal_su);
void (*edp_get_psr_residency)(
const struct dc_link *link, uint32_t *residency);
+
+ bool (*edp_get_replay_state)(
+ const struct dc_link *link, uint64_t *state);
+ bool (*edp_set_replay_allow_active)(struct dc_link *dc_link,
+ const bool *enable, bool wait, bool force_static,
+ const unsigned int *power_opts);
+ bool (*edp_setup_replay)(struct dc_link *link,
+ const struct dc_stream_state *stream);
+ bool (*edp_set_coasting_vtotal)(
+ struct dc_link *link, uint16_t coasting_vtotal);
+ bool (*edp_replay_residency)(const struct dc_link *link,
+ unsigned int *residency, const bool is_start,
+ const bool is_alpm);
+
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index eaeb684c8a48..e546b9c506c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -142,10 +142,6 @@ struct clock_source *dc_resource_find_first_free_pll(
struct resource_context *res_ctx,
const struct resource_pool *pool);
-struct pipe_ctx *resource_get_head_pipe_for_stream(
- struct resource_context *res_ctx,
- struct dc_stream_state *stream);
-
bool resource_attach_surfaces_to_context(
struct dc_plane_state *const *plane_state,
int surface_count,
@@ -153,11 +149,232 @@ bool resource_attach_surfaces_to_context(
struct dc_state *context,
const struct resource_pool *pool);
-struct pipe_ctx *find_idle_secondary_pipe(
+#define FREE_PIPE_INDEX_NOT_FOUND -1
+
+/*
+ * pipe types are identified based on MUXes in DCN front end that are capable
+ * of taking input from one DCN pipeline to another DCN pipeline. The name is
+ * in a form of XXXX_YYYY, where XXXX is the DCN front end hardware block the
+ * pipeline ends with and YYYY is the rendering role that the pipe is in.
+ *
+ * For instance OTG_MASTER is a pipe ending with OTG hardware block in its
+ * pipeline and it is in a role of a master pipe for timing generation.
+ *
+ * For quick reference a diagram of each pipe type's areas of responsibility
+ * for outputting timings on the screen is shown below:
+ *
+ * Timing Active for Stream 0
+ * __________________________________________________
+ * |OTG master 0 (OPP head 0)|OPP head 2 (DPP pipe 2) |
+ * | (DPP pipe 0)| |
+ * | Top Plane 0 | |
+ * | ______________|____ |
+ * | |DPP pipe 1 |DPP | |
+ * | | |pipe| |
+ * | | Bottom |3 | |
+ * | | Plane 1 | | |
+ * | | | | |
+ * | |______________|____| |
+ * | | |
+ * | | |
+ * | ODM slice 0 | ODM slice 1 |
+ * |_________________________|________________________|
+ *
+ * Timing Active for Stream 1
+ * __________________________________________________
+ * |OTG master 4 (OPP head 4) |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | Blank Pixel Data |
+ * | (generated by DPG4) |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * |__________________________________________________|
+ *
+ * Inter-pipe Relation
+ * __________________________________________________
+ * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
+ * | | plane 0 | slice 0 | |
+ * | 0 | -------------MPC---------ODM----------- |
+ * | | plane 1 | | | | |
+ * | 1 | ------------- | | | |
+ * | | plane 0 | slice 1 | | |
+ * | 2 | -------------MPC--------- | |
+ * | | plane 1 | | | |
+ * | 3 | ------------- | | |
+ * | | | blank | |
+ * | 4 | | ----------------------- |
+ * | | | | |
+ * | 5 | (FREE) | | |
+ * |________|_______________|___________|_____________|
+ */
+enum pipe_type {
+ /* free pipe - free pipe is an uninitialized pipe without a stream
+ * associated with it. It is a free DCN pipe resource. It can be
+ * acquired as any type of pipe.
+ */
+ FREE_PIPE,
+
+ /* OTG master pipe - the master pipe of its OPP head pipes with a
+ * functional OTG. It merges all its OPP head pipes pixel data in ODM
+ * block and output to backend DIG. OTG master pipe is responsible for
+ * generating entire crtc timing to backend DIG. An OTG master pipe may
+ * or may not have a plane. If it has a plane it blends it as the left
+ * most MPC slice of the top most layer. If it doesn't have a plane it
+ * can output pixel data from its OPP head pipes' test pattern
+ * generators (DPG) such as solid black pixel data to blank the screen.
+ */
+ OTG_MASTER,
+
+ /* OPP head pipe - the head pipe of an MPC blending tree with a
+ * functional OPP outputting to an OTG. OPP head pipe is responsible for
+ * processing output pixels in its own ODM slice. It may or may not have
+ * a plane. If it has a plane it blends it as the top most layer within
+ * its own ODM slice. If it doesn't have a plane it can output pixel
+ * data from its DPG such as solid black pixel data to blank the pixel
+ * data in its own ODM slice. OTG master pipe is also an OPP head pipe
+ * but with more responsibility.
+ */
+ OPP_HEAD,
+
+ /* DPP pipe - the pipe with a functional DPP outputting to an OPP head
+ * pipe's MPC. DPP pipe is responsible for processing pixel data from
+ * its own MPC slice of a plane. It must be connected to an OPP head
+ * pipe and it must have a plane associated with it.
+ */
+ DPP_PIPE,
+};
+
+/*
+ * Determine if the input pipe ctx is of a pipe type.
+ * return - true if pipe ctx is of the input type.
+ */
+bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type);
+
+/*
+ * Determine if the input pipe ctx is used for rendering a plane with MPCC
+ * combine. MPCC combine is a hardware feature to combine multiple DPP pipes
+ * into a single plane. It is typically used for bypassing pipe bandwidth
+ * limitation for rendering a very large plane or saving power by reducing UCLK
+ * and DPPCLK speeds.
+ *
+ * For instance in the Inter-pipe Relation diagram shown below, both PIPE 0 and
+ * 1 are for MPCC combine for plane 0
+ *
+ * Inter-pipe Relation
+ * __________________________________________________
+ * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
+ * | | plane 0 | | |
+ * | 0 | -------------MPC----------------------- |
+ * | | plane 0 | | | |
+ * | 1 | ------------- | | |
+ * |________|_______________|___________|_____________|
+ *
+ * return - true if pipe ctx is used for mpcc combine.
+ */
+bool resource_is_for_mpcc_combine(const struct pipe_ctx *pipe_ctx);
+
+/*
+ * Look for a free pipe in new resource context that is used as a secondary DPP
+ * pipe in MPC blending tree associated with input OPP head pipe.
+ *
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int resource_find_free_pipe_used_in_cur_mpc_blending_tree(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct pipe_ctx *cur_opp_head);
+
+/*
+ * Look for a free pipe in new resource context that is not used in current
+ * resource context.
+ *
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int recource_find_free_pipe_not_used_in_cur_res_ctx(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool);
+
+/*
+ * Look for a free pipe in new resource context that is used as a secondary DPP
+ * pipe in any MPCC combine in current resource context.
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool);
+
+/*
+ * Look for any free pipe in new resource context.
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int resource_find_any_free_pipe(struct resource_context *new_res_ctx,
+ const struct resource_pool *pool);
+
+/*
+ * Legacy find free secondary pipe logic deprecated for newer DCNs as it doesn't
+ * find the most optimal free pipe to prevent from time consuming hardware state
+ * transitions.
+ */
+struct pipe_ctx *resource_find_free_secondary_pipe_legacy(
struct resource_context *res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe);
+/*
+ * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice
+ * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it
+ * will have 4 pieces of slice.
+ * return - 0 if pipe is not used for a plane with MPCC combine. otherwise
+ * the number of MPC "cuts" for the plane.
+ */
+int resource_get_num_mpc_splits(const struct pipe_ctx *pipe);
+
+/*
+ * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice
+ * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it
+ * will have 4 pieces of slice.
+ * return - 0 if pipe is not used for ODM combine. otherwise
+ * the number of ODM "cuts" for the timing.
+ */
+int resource_get_num_odm_splits(const struct pipe_ctx *pipe);
+
+/*
+ * Get the OTG master pipe in resource context associated with the stream.
+ * return - NULL if not found. Otherwise the OTG master pipe associated with the
+ * stream.
+ */
+struct pipe_ctx *resource_get_otg_master_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream);
+
+/*
+ * Get the OTG master pipe for the input pipe context.
+ * return - the OTG master pipe for the input pipe
+ * context.
+ */
+struct pipe_ctx *resource_get_otg_master(const struct pipe_ctx *pipe_ctx);
+
+/*
+ * Get the OPP head pipe for the input pipe context.
+ * return - the OPP head pipe for the input pipe
+ * context.
+ */
+struct pipe_ctx *resource_get_opp_head(const struct pipe_ctx *pipe_ctx);
+
+
bool resource_validate_attach_surfaces(
const struct dc_validation_set set[],
int set_count,
@@ -193,10 +410,6 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
-int get_num_mpc_splits(struct pipe_ctx *pipe);
-
-int get_num_odm_splits(struct pipe_ctx *pipe);
-
bool get_temp_dp_link_res(struct dc_link *link,
struct link_resource *link_res,
struct dc_link_settings *link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
index c923b2af8510..37bc98faa7a0 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
@@ -38,10 +38,9 @@
#define DCN_BASE__INST0_SEG2 0x000034C0
-static enum dc_irq_source to_dal_irq_source_dcn314(
- struct irq_service *irq_service,
- uint32_t src_id,
- uint32_t ext_id)
+static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
{
switch (src_id) {
case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile
index a52b56e2859e..6af8a97d4a77 100644
--- a/drivers/gpu/drm/amd/display/dc/link/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/link/Makefile
@@ -42,7 +42,8 @@ AMD_DISPLAY_FILES += $(AMD_DAL_LINK_ACCESSORIES)
###############################################################################
# hwss
###############################################################################
-LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o
+LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o \
+link_hwss_dio_fixed_vs_pe_retimer.o link_hwss_hpo_fixed_vs_pe_retimer_dp.o
AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \
$(LINK_HWSS))
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index db9f1baa27e5..fe4282771cd0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -428,15 +428,24 @@ static void set_crtc_test_pattern(struct dc_link *link,
stream->timing.display_color_depth;
struct bit_depth_reduction_params params;
struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
- int width = pipe_ctx->stream->timing.h_addressable +
+ struct pipe_ctx *odm_pipe;
+ int odm_cnt = 1;
+ int h_active = pipe_ctx->stream->timing.h_addressable +
pipe_ctx->stream->timing.h_border_left +
pipe_ctx->stream->timing.h_border_right;
- int height = pipe_ctx->stream->timing.v_addressable +
+ int v_active = pipe_ctx->stream->timing.v_addressable +
pipe_ctx->stream->timing.v_border_bottom +
pipe_ctx->stream->timing.v_border_top;
+ int odm_slice_width, last_odm_slice_width, offset = 0;
memset(&params, 0, sizeof(params));
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_cnt++;
+
+ odm_slice_width = h_active / odm_cnt;
+ last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1);
+
switch (test_pattern) {
case DP_TEST_PATTERN_COLOR_SQUARES:
controller_test_pattern =
@@ -473,16 +482,13 @@ static void set_crtc_test_pattern(struct dc_link *link,
{
/* disable bit depth reduction */
pipe_ctx->stream->bit_depth_params = params;
- opp->funcs->opp_program_bit_depth_reduction(opp, &params);
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) {
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
- else if (link->dc->hwss.set_disp_pattern_generator) {
- struct pipe_ctx *odm_pipe;
+ } else if (link->dc->hwss.set_disp_pattern_generator) {
enum controller_dp_color_space controller_color_space;
- int opp_cnt = 1;
- int offset = 0;
- int dpg_width = width;
+ struct output_pixel_processor *odm_opp;
switch (test_pattern_color_space) {
case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -502,24 +508,9 @@ static void set_crtc_test_pattern(struct dc_link *link,
break;
}
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
- dpg_width = width / opp_cnt;
- offset = dpg_width;
-
- link->dc->hwss.set_disp_pattern_generator(link->dc,
- pipe_ctx,
- controller_test_pattern,
- controller_color_space,
- color_depth,
- NULL,
- dpg_width,
- height,
- 0);
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
+ odm_pipe = pipe_ctx;
+ while (odm_pipe->next_odm_pipe) {
+ odm_opp = odm_pipe->stream_res.opp;
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
link->dc->hwss.set_disp_pattern_generator(link->dc,
odm_pipe,
@@ -527,11 +518,23 @@ static void set_crtc_test_pattern(struct dc_link *link,
controller_color_space,
color_depth,
NULL,
- dpg_width,
- height,
+ odm_slice_width,
+ v_active,
offset);
- offset += offset;
+ offset += odm_slice_width;
+ odm_pipe = odm_pipe->next_odm_pipe;
}
+ odm_opp = odm_pipe->stream_res.opp;
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ link->dc->hwss.set_disp_pattern_generator(link->dc,
+ odm_pipe,
+ controller_test_pattern,
+ controller_color_space,
+ color_depth,
+ NULL,
+ last_odm_slice_width,
+ v_active,
+ offset);
}
}
break;
@@ -540,23 +543,17 @@ static void set_crtc_test_pattern(struct dc_link *link,
/* restore bitdepth reduction */
resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
pipe_ctx->stream->bit_depth_params = params;
- opp->funcs->opp_program_bit_depth_reduction(opp, &params);
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) {
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- color_depth);
- else if (link->dc->hwss.set_disp_pattern_generator) {
- struct pipe_ctx *odm_pipe;
- int opp_cnt = 1;
- int dpg_width;
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
-
- dpg_width = width / opp_cnt;
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ color_depth);
+ } else if (link->dc->hwss.set_disp_pattern_generator) {
+ struct output_pixel_processor *odm_opp;
+ odm_pipe = pipe_ctx;
+ while (odm_pipe->next_odm_pipe) {
+ odm_opp = odm_pipe->stream_res.opp;
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
link->dc->hwss.set_disp_pattern_generator(link->dc,
odm_pipe,
@@ -564,19 +561,23 @@ static void set_crtc_test_pattern(struct dc_link *link,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- dpg_width,
- height,
- 0);
+ odm_slice_width,
+ v_active,
+ offset);
+ offset += odm_slice_width;
+ odm_pipe = odm_pipe->next_odm_pipe;
}
+ odm_opp = odm_pipe->stream_res.opp;
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
link->dc->hwss.set_disp_pattern_generator(link->dc,
- pipe_ctx,
+ odm_pipe,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- dpg_width,
- height,
- 0);
+ last_odm_slice_width,
+ v_active,
+ offset);
}
}
break;
@@ -674,7 +675,8 @@ bool dp_set_test_pattern(
if (pipes[i].stream == NULL)
continue;
- if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
+ if (resource_is_pipe_type(&pipes[i], OTG_MASTER) &&
+ pipes[i].stream->link == link) {
pipe_ctx = &pipes[i];
break;
}
@@ -702,6 +704,7 @@ bool dp_set_test_pattern(
/* Reset Test Pattern state */
link->test_pattern_enabled = false;
+ link->current_test_pattern = test_pattern;
return true;
}
@@ -739,6 +742,7 @@ bool dp_set_test_pattern(
if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
/* Set Test Pattern state */
link->test_pattern_enabled = true;
+ link->current_test_pattern = test_pattern;
if (p_link_settings != NULL)
dpcd_set_link_settings(link,
p_link_settings);
@@ -937,6 +941,7 @@ bool dp_set_test_pattern(
/* Set Test Pattern state */
link->test_pattern_enabled = true;
+ link->current_test_pattern = test_pattern;
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index bebf9c4c8702..1328a0ade342 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -46,6 +46,9 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
+ if (stream_enc->funcs->map_stream_to_link)
+ stream_enc->funcs->map_stream_to_link(stream_enc,
+ stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A);
if (stream_enc->funcs->enable_fifo)
stream_enc->funcs->enable_fifo(stream_enc);
}
@@ -163,7 +166,7 @@ void set_dio_dp_lane_settings(struct dc_link *link,
link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings);
}
-static void update_dio_stream_allocation_table(struct dc_link *link,
+void update_dio_stream_allocation_table(struct dc_link *link,
const struct link_resource *link_res,
const struct link_mst_stream_allocation_table *table)
{
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index 8b8a099feeb0..f4633d3cf9b9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -55,5 +55,8 @@ void setup_dio_audio_output(struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output, uint32_t audio_inst);
void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx);
void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx);
+void update_dio_stream_allocation_table(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table);
#endif /* __LINK_HWSS_DIO_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
new file mode 100644
index 000000000000..b659baa23147
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_hwss_dio.h"
+#include "link_hwss_dio_fixed_vs_pe_retimer.h"
+#include "link_enc_cfg.h"
+
+uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link)
+{
+ // TODO: Get USB-C cable orientation
+ if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR)
+ return 0xF2;
+ else
+ return 0x12;
+}
+
+void dp_dio_fixed_vs_pe_retimer_exit_manual_automation(struct dc_link *link)
+{
+ const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link);
+ const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+ const uint8_t vendor_lttpr_exit_manual_automation_1[4] = {0x1, 0x50, dp_type, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_2[4] = {0x1, 0x50, 0x50, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_3[4] = {0x1, 0x51, 0x50, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_4[4] = {0x1, 0x10, 0x58, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_5[4] = {0x1, 0x10, 0x59, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_6[4] = {0x1, 0x30, 0x51, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_7[4] = {0x1, 0x30, 0x52, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_8[4] = {0x1, 0x30, 0x54, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_9[4] = {0x1, 0x30, 0x55, 0x0};
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_0[0], sizeof(vendor_lttpr_exit_manual_automation_0));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_1[0], sizeof(vendor_lttpr_exit_manual_automation_1));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_2[0], sizeof(vendor_lttpr_exit_manual_automation_2));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_3[0], sizeof(vendor_lttpr_exit_manual_automation_3));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_4[0], sizeof(vendor_lttpr_exit_manual_automation_4));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_5[0], sizeof(vendor_lttpr_exit_manual_automation_5));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_6[0], sizeof(vendor_lttpr_exit_manual_automation_6));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_7[0], sizeof(vendor_lttpr_exit_manual_automation_7));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_8[0], sizeof(vendor_lttpr_exit_manual_automation_8));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_9[0], sizeof(vendor_lttpr_exit_manual_automation_9));
+}
+
+static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_link *link,
+ const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params,
+ const struct link_hwss *link_hwss)
+{
+ struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 };
+ const uint8_t pltpat_custom[10] = {0x1F, 0x7C, 0xF0, 0xC1, 0x07, 0x1F, 0x7C, 0xF0, 0xC1, 0x07};
+ const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
+ const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+
+
+ if (tp_params == NULL)
+ return false;
+
+ if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
+ link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) {
+ // Deprogram overrides from previous test pattern
+ dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
+ }
+
+ switch (tp_params->dp_phy_pattern) {
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern,
+ pltpat_custom, tp_params->custom_pattern_size) != 0)
+ return false;
+ break;
+ case DP_TEST_PATTERN_D102:
+ break;
+ default:
+ if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM ||
+ link->current_test_pattern == DP_TEST_PATTERN_D102)
+ // Deprogram overrides from previous test pattern
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_0[0],
+ sizeof(vendor_lttpr_exit_manual_automation_0));
+
+ return false;
+ }
+
+ hw_tp_params.dp_phy_pattern = tp_params->dp_phy_pattern;
+ hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode;
+
+ if (link_hwss->ext.set_dp_link_test_pattern)
+ link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params);
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0));
+
+ return true;
+}
+
+static void set_dio_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (!set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(
+ link, link_res, tp_params, get_dio_link_hwss())) {
+ link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
+ }
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+}
+
+void enable_dio_fixed_vs_pe_retimer_program_4lane_output(struct dc_link *link)
+{
+ const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+ const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+ const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+ const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+ const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5));
+}
+
+static void enable_dio_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ if (link_settings->lane_count == LANE_COUNT_FOUR)
+ enable_dio_fixed_vs_pe_retimer_program_4lane_output(link);
+
+ enable_dio_dp_link_output(link, link_res, signal, clock_source, link_settings);
+}
+
+static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = {
+ .setup_stream_encoder = setup_dio_stream_encoder,
+ .reset_stream_encoder = reset_dio_stream_encoder,
+ .setup_stream_attribute = setup_dio_stream_attribute,
+ .disable_link_output = disable_dio_link_output,
+ .setup_audio_output = setup_dio_audio_output,
+ .enable_audio_packet = enable_dio_audio_packet,
+ .disable_audio_packet = disable_dio_audio_packet,
+ .ext = {
+ .set_throttled_vcp_size = set_dio_throttled_vcp_size,
+ .enable_dp_link_output = enable_dio_fixed_vs_pe_retimer_dp_link_output,
+ .set_dp_link_test_pattern = set_dio_fixed_vs_pe_retimer_dp_link_test_pattern,
+ .set_dp_lane_settings = set_dio_dp_lane_settings,
+ .update_stream_allocation_table = update_dio_stream_allocation_table,
+ },
+};
+
+bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link)
+{
+ if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
+ return false;
+
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
+ return false;
+
+ return true;
+}
+
+const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void)
+{
+ return &dio_fixed_vs_pe_retimer_link_hwss;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
new file mode 100644
index 000000000000..9ac08a332540
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
+#define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
+
+#include "link.h"
+
+uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link);
+uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link);
+void dp_dio_fixed_vs_pe_retimer_exit_manual_automation(struct dc_link *link);
+void enable_dio_fixed_vs_pe_retimer_program_4lane_output(struct dc_link *link);
+bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link);
+const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void);
+
+#endif /* __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
index edd7d026a762..e1257404357b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
@@ -28,25 +28,7 @@
#include "dccg.h"
#include "clk_mgr.h"
-static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
-{
- switch (link->link_enc->transmitter) {
- case TRANSMITTER_UNIPHY_A:
- return PHYD32CLKA;
- case TRANSMITTER_UNIPHY_B:
- return PHYD32CLKB;
- case TRANSMITTER_UNIPHY_C:
- return PHYD32CLKC;
- case TRANSMITTER_UNIPHY_D:
- return PHYD32CLKD;
- case TRANSMITTER_UNIPHY_E:
- return PHYD32CLKE;
- default:
- return PHYD32CLKA;
- }
-}
-
-static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
+void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size)
{
struct hpo_dp_stream_encoder *hpo_dp_stream_encoder =
@@ -59,7 +41,7 @@ static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
throttled_vcp_size);
}
-static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
+void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
const struct dc_link_settings *link_settings,
struct fixed31_32 throttled_vcp_size)
{
@@ -87,7 +69,7 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
hblank_min_symbol_width);
}
-static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
+void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
{
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc;
@@ -96,14 +78,14 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst);
}
-static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
+void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
{
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
stream_enc->funcs->disable(stream_enc);
}
-static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
+void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
{
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -120,81 +102,36 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
}
-static void enable_hpo_dp_fpga_link_output(struct dc_link *link,
+void enable_hpo_dp_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
- const struct dc *dc = link->dc;
- enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(link);
- int phyd32clk_freq_khz = link_settings->link_rate == LINK_RATE_UHBR10 ? 312500 :
- link_settings->link_rate == LINK_RATE_UHBR13_5 ? 412875 :
- link_settings->link_rate == LINK_RATE_UHBR20 ? 625000 : 0;
-
- dm_set_phyd32clk(dc->ctx, phyd32clk_freq_khz);
- dc->res_pool->dccg->funcs->set_physymclk(
- dc->res_pool->dccg,
- link->link_enc_hw_inst,
- PHYSYMCLK_FORCE_SRC_PHYD32CLK,
- true);
- dc->res_pool->dccg->funcs->enable_symclk32_le(
- dc->res_pool->dccg,
- link_res->hpo_dp_link_enc->inst,
- phyd32clk);
- link_res->hpo_dp_link_enc->funcs->link_enable(
+ if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating)
+ link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating(
+ link->dc->res_pool->dccg,
+ link_res->hpo_dp_link_enc->inst,
+ true);
+ link_res->hpo_dp_link_enc->funcs->enable_link_phy(
link_res->hpo_dp_link_enc,
- link_settings->lane_count);
-
-}
-
-static void enable_hpo_dp_link_output(struct dc_link *link,
- const struct link_resource *link_res,
- enum signal_type signal,
- enum clock_source_id clock_source,
- const struct dc_link_settings *link_settings)
-{
- if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment))
- enable_hpo_dp_fpga_link_output(link, link_res, signal,
- clock_source, link_settings);
- else
- link_res->hpo_dp_link_enc->funcs->enable_link_phy(
- link_res->hpo_dp_link_enc,
- link_settings,
- link->link_enc->transmitter,
- link->link_enc->hpd_source);
-}
-
-
-static void disable_hpo_dp_fpga_link_output(struct dc_link *link,
- const struct link_resource *link_res,
- enum signal_type signal)
-{
- const struct dc *dc = link->dc;
-
- link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
- dc->res_pool->dccg->funcs->disable_symclk32_le(
- dc->res_pool->dccg,
- link_res->hpo_dp_link_enc->inst);
- dc->res_pool->dccg->funcs->set_physymclk(
- dc->res_pool->dccg,
- link->link_enc_hw_inst,
- PHYSYMCLK_FORCE_SRC_SYMCLK,
- false);
- dm_set_phyd32clk(dc->ctx, 0);
+ link_settings,
+ link->link_enc->transmitter,
+ link->link_enc->hpd_source);
}
-static void disable_hpo_dp_link_output(struct dc_link *link,
+void disable_hpo_dp_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
- if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment)) {
- disable_hpo_dp_fpga_link_output(link, link_res, signal);
- } else {
link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
link_res->hpo_dp_link_enc->funcs->disable_link_phy(
link_res->hpo_dp_link_enc, signal);
- }
+ if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating)
+ link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating(
+ link->dc->res_pool->dccg,
+ link_res->hpo_dp_link_enc->inst,
+ false);
}
static void set_hpo_dp_link_test_pattern(struct dc_link *link,
@@ -217,7 +154,7 @@ static void set_hpo_dp_lane_settings(struct dc_link *link,
lane_settings[0].FFE_PRESET.raw);
}
-static void update_hpo_dp_stream_allocation_table(struct dc_link *link,
+void update_hpo_dp_stream_allocation_table(struct dc_link *link,
const struct link_resource *link_res,
const struct link_mst_stream_allocation_table *table)
{
@@ -226,7 +163,7 @@ static void update_hpo_dp_stream_allocation_table(struct dc_link *link,
table);
}
-static void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx,
+void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output, uint32_t audio_inst)
{
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup(
@@ -235,13 +172,13 @@ static void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx,
&pipe_ctx->stream->audio_info);
}
-static void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx)
+void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx)
{
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable(
pipe_ctx->stream_res.hpo_dp_stream_enc);
}
-static void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx)
+void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->stream_res.audio)
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable(
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
index 3cbb94b41a23..1d3ed8ca83b5 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
@@ -28,9 +28,35 @@
#include "link_hwss.h"
#include "link.h"
+void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
+ struct fixed31_32 throttled_vcp_size);
+void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
+ const struct dc_link_settings *link_settings,
+ struct fixed31_32 throttled_vcp_size);
+void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
+ const struct dc_link_settings *link_settings,
+ struct fixed31_32 throttled_vcp_size);
+void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx);
+void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx);
+void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx);
+void enable_hpo_dp_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
+void disable_hpo_dp_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+void update_hpo_dp_stream_allocation_table(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table);
+void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx,
+ struct audio_output *audio_output, uint32_t audio_inst);
+void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx);
+void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx);
+const struct link_hwss *get_hpo_dp_link_hwss(void);
bool can_use_hpo_dp_link_hwss(const struct dc_link *link,
const struct link_resource *link_res);
-const struct link_hwss *get_hpo_dp_link_hwss(void);
#endif /* __LINK_HWSS_HPO_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
new file mode 100644
index 000000000000..b621b97711b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_hwss_hpo_dp.h"
+#include "link_hwss_hpo_fixed_vs_pe_retimer_dp.h"
+#include "link_hwss_dio_fixed_vs_pe_retimer.h"
+
+static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
+ const struct dc_lane_settings *hw_lane_settings)
+{
+ const uint8_t vendor_ffe_preset_table[16] = {
+ 0x01, 0x41, 0x61, 0x81,
+ 0xB1, 0x05, 0x35, 0x65,
+ 0x85, 0xA5, 0x09, 0x39,
+ 0x59, 0x89, 0x0F, 0x24};
+
+ const uint8_t ffe_mask[4] = {
+ (hw_lane_settings[0].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF)
+ & (hw_lane_settings[0].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF),
+ (hw_lane_settings[1].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF)
+ & (hw_lane_settings[1].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF),
+ (hw_lane_settings[2].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF)
+ & (hw_lane_settings[2].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF),
+ (hw_lane_settings[3].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF)
+ & (hw_lane_settings[3].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF)};
+
+ const uint8_t ffe_cfg[4] = {
+ vendor_ffe_preset_table[hw_lane_settings[0].FFE_PRESET.settings.level] & ffe_mask[0],
+ vendor_ffe_preset_table[hw_lane_settings[1].FFE_PRESET.settings.level] & ffe_mask[1],
+ vendor_ffe_preset_table[hw_lane_settings[2].FFE_PRESET.settings.level] & ffe_mask[2],
+ vendor_ffe_preset_table[hw_lane_settings[3].FFE_PRESET.settings.level] & ffe_mask[3]};
+
+ const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link);
+
+ const uint8_t vendor_lttpr_write_data_ffe1[4] = {0x01, 0x50, dp_type, 0x0F};
+ const uint8_t vendor_lttpr_write_data_ffe2[4] = {0x01, 0x55, dp_type, ffe_cfg[0]};
+ const uint8_t vendor_lttpr_write_data_ffe3[4] = {0x01, 0x56, dp_type, ffe_cfg[1]};
+ const uint8_t vendor_lttpr_write_data_ffe4[4] = {0x01, 0x57, dp_type, ffe_cfg[2]};
+ const uint8_t vendor_lttpr_write_data_ffe5[4] = {0x01, 0x58, dp_type, ffe_cfg[3]};
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_ffe1[0], sizeof(vendor_lttpr_write_data_ffe1));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_ffe2[0], sizeof(vendor_lttpr_write_data_ffe2));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_ffe3[0], sizeof(vendor_lttpr_write_data_ffe3));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_ffe4[0], sizeof(vendor_lttpr_write_data_ffe4));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_ffe5[0], sizeof(vendor_lttpr_write_data_ffe5));
+}
+
+static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
+ struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+ const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
+ const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0};
+ const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0};
+ const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21};
+ const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21};
+ const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F};
+ const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F};
+ const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20};
+ const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20};
+ const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20};
+ const uint8_t vendor_lttpr_write_data_pg10[4] = {0x1, 0x30, 0x55, 0x20};
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg1[0], sizeof(vendor_lttpr_write_data_pg1));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg2[0], sizeof(vendor_lttpr_write_data_pg2));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg3[0], sizeof(vendor_lttpr_write_data_pg3));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg4[0], sizeof(vendor_lttpr_write_data_pg4));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg5[0], sizeof(vendor_lttpr_write_data_pg5));
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg6[0], sizeof(vendor_lttpr_write_data_pg6));
+
+ if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR)
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg7[0], sizeof(vendor_lttpr_write_data_pg7));
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg8[0], sizeof(vendor_lttpr_write_data_pg8));
+
+ if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR)
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg9[0], sizeof(vendor_lttpr_write_data_pg9));
+
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pg10[0], sizeof(vendor_lttpr_write_data_pg10));
+}
+
+static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link *link,
+ const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params,
+ const struct link_hwss *link_hwss)
+{
+ struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 };
+ const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06};
+
+ if (tp_params == NULL)
+ return false;
+
+ if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN ||
+ tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) {
+ // Deprogram overrides from previously set square wave override
+ if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM ||
+ link->current_test_pattern == DP_TEST_PATTERN_D102)
+ link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_exit_manual_automation_0[0],
+ sizeof(vendor_lttpr_exit_manual_automation_0));
+ else
+ dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link);
+
+ return false;
+ }
+
+ hw_tp_params.dp_phy_pattern = DP_TEST_PATTERN_PRBS31;
+ hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode;
+
+ if (link_hwss->ext.set_dp_link_test_pattern)
+ link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params);
+
+ dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params);
+
+ dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]);
+
+ return true;
+}
+
+static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+ if (!dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(
+ link, link_res, tp_params, get_hpo_dp_link_hwss())) {
+ link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
+ link_res->hpo_dp_link_enc, tp_params);
+ }
+ link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+}
+
+static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings,
+ const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+ link_res->hpo_dp_link_enc->funcs->set_ffe(
+ link_res->hpo_dp_link_enc,
+ link_settings,
+ lane_settings[0].FFE_PRESET.raw);
+
+ // FFE is programmed when retimer is programmed for SQ128, but explicit
+ // programming needed here as well in case FFE-only update is requested
+ if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN &&
+ link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END)
+ dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]);
+}
+
+static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ if (link_settings->lane_count == LANE_COUNT_FOUR)
+ enable_dio_fixed_vs_pe_retimer_program_4lane_output(link);
+
+ enable_hpo_dp_link_output(link, link_res, signal, clock_source, link_settings);
+}
+
+static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = {
+ .setup_stream_encoder = setup_hpo_dp_stream_encoder,
+ .reset_stream_encoder = reset_hpo_dp_stream_encoder,
+ .setup_stream_attribute = setup_hpo_dp_stream_attribute,
+ .disable_link_output = disable_hpo_dp_link_output,
+ .setup_audio_output = setup_hpo_dp_audio_output,
+ .enable_audio_packet = enable_hpo_dp_audio_packet,
+ .disable_audio_packet = disable_hpo_dp_audio_packet,
+ .ext = {
+ .set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size,
+ .set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width,
+ .enable_dp_link_output = enable_hpo_fixed_vs_pe_retimer_dp_link_output,
+ .set_dp_link_test_pattern = set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern,
+ .set_dp_lane_settings = set_hpo_fixed_vs_pe_retimer_dp_lane_settings,
+ .update_stream_allocation_table = update_hpo_dp_stream_allocation_table,
+ },
+};
+
+bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link)
+{
+ if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
+ return false;
+
+ if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED)
+ return false;
+
+ return true;
+}
+
+const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void)
+{
+ return &hpo_fixed_vs_pe_retimer_dp_link_hwss;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
new file mode 100644
index 000000000000..82301187bc7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
+#define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
+
+#include "link.h"
+
+bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link);
+const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void);
+
+#endif /* __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index a131e30fd7d6..c9b6676eaf53 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -593,6 +593,10 @@ static bool detect_dp(struct dc_link *link,
/* DP SST branch */
link->type = dc_connection_sst_branch;
} else {
+ if (link->dc->debug.disable_dp_plus_plus_wa &&
+ link->link_enc->features.flags.bits.IS_UHBR20_CAPABLE)
+ return false;
+
/* DP passive dongles */
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
sink_caps,
@@ -872,8 +876,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
(link->dpcd_sink_ext_caps.bits.oled == 1)) {
dpcd_set_source_specific_data(link);
msleep(post_oui_delay);
- set_default_brightness_aux(link);
- //TODO: use cached
+ set_cached_brightness_aux(link);
}
return true;
@@ -980,6 +983,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
(link->dpcd_caps.dongle_type !=
DISPLAY_DONGLE_DP_HDMI_CONVERTER))
converter_disable_audio = true;
+
+ /* limited link rate to HBR3 for DPIA until we implement USB4 V2 */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
+ link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 2267fb097830..79aef205598b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -182,11 +182,8 @@ void link_resume(struct dc_link *link)
static bool is_master_pipe_for_link(const struct dc_link *link,
const struct pipe_ctx *pipe)
{
- return (pipe->stream &&
- pipe->stream->link &&
- pipe->stream->link == link &&
- pipe->top_pipe == NULL &&
- pipe->prev_odm_pipe == NULL);
+ return resource_is_pipe_type(pipe, OTG_MASTER) &&
+ pipe->stream->link == link;
}
/*
@@ -765,7 +762,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
struct dc_stream_state *stream = pipe_ctx->stream;
bool result = false;
- if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ if (dc_is_virtual_signal(stream->signal))
result = true;
else
result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
@@ -778,7 +775,6 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
@@ -816,8 +812,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
/* Enable DSC in encoder */
- if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
- && !dp_is_128b_132b_signal(pipe_ctx)) {
+ if (dc_is_dp_signal(stream->signal) && !dp_is_128b_132b_signal(pipe_ctx)) {
DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
dsc_optc_config_log(dsc, &dsc_optc_cfg);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
@@ -849,7 +844,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
false,
NULL,
true);
- else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ else {
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
pipe_ctx->stream_res.stream_enc,
OPTC_DSC_DISABLED, 0, 0);
@@ -1081,8 +1076,14 @@ static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
{
uint64_t kbps;
+ enum dc_link_encoding_format link_encoding;
+
+ if (dp_is_128b_132b_signal(pipe_ctx))
+ link_encoding = DC_LINK_ENCODING_DP_128b_132b;
+ else
+ link_encoding = DC_LINK_ENCODING_DP_8b_10b;
- kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
+ kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing, link_encoding);
return get_pbn_from_bw_in_kbps(kbps);
}
@@ -1540,7 +1541,8 @@ struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
struct fixed31_32 timing_bw =
dc_fixpt_from_int(
- dc_bandwidth_in_kbps_from_timing(&stream->timing));
+ dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(link)));
struct fixed31_32 avg_time_slots_per_mtp =
dc_fixpt_div(timing_bw, timeslot_bw_effective);
@@ -1973,6 +1975,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
bool is_vga_mode = (stream->timing.h_addressable == 640)
&& (stream->timing.v_addressable == 480);
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
if (stream->phy_pix_clk == 0)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
@@ -2012,6 +2015,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
display_color_depth = COLOR_DEPTH_888;
+ /* We need to enable stream encoder for TMDS first to apply 1/4 TMDS
+ * character clock in case that beyond 340MHz.
+ */
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ link_hwss->setup_stream_encoder(pipe_ctx);
+
dc->hwss.enable_tmds_link_output(
link,
&pipe_ctx->link_res,
@@ -2131,7 +2140,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
- set_default_brightness_aux(link); // TODO: use cached if known
+ set_cached_brightness_aux(link);
+
if (link->dpcd_sink_ext_caps.bits.oled == 1)
msleep(bl_oled_enable_delay);
edp_backlight_enable_aux(link, true);
@@ -2209,9 +2219,8 @@ static enum dc_status enable_link(
* link settings. Need to call disable first before enabling at
* new link settings.
*/
- if (link->link_status.link_active) {
+ if (link->link_status.link_active && !stream->skip_edp_power_down)
disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
- }
switch (pipe_ctx->stream->signal) {
case SIGNAL_TYPE_DISPLAY_PORT:
@@ -2271,8 +2280,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
}
}
- if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
- dc_is_virtual_signal(pipe_ctx->stream->signal))
+ if (dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) {
@@ -2330,7 +2338,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
dc->hwss.disable_stream(pipe_ctx);
} else {
dc->hwss.disable_stream(pipe_ctx);
- disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ if (!pipe_ctx->stream->skip_edp_power_down) {
+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ }
}
if (pipe_ctx->stream->timing.flags.DSC) {
@@ -2358,6 +2368,8 @@ void link_set_dpms_on(
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ bool apply_edp_fast_boot_optimization =
+ pipe_ctx->stream->apply_edp_fast_boot_optimization;
ASSERT(is_master_pipe_for_link(link, pipe_ctx));
@@ -2375,8 +2387,7 @@ void link_set_dpms_on(
}
}
- if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
- dc_is_virtual_signal(pipe_ctx->stream->signal))
+ if (dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
link_enc = link_enc_cfg_get_link_enc(link);
@@ -2402,138 +2413,126 @@ void link_set_dpms_on(
link_hwss->setup_stream_attribute(pipe_ctx);
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- bool apply_edp_fast_boot_optimization =
- pipe_ctx->stream->apply_edp_fast_boot_optimization;
-
- pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
-
- // Enable VPG before building infoframe
- if (vpg && vpg->funcs->vpg_poweron)
- vpg->funcs->vpg_poweron(vpg);
+ pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
- resource_build_info_frame(pipe_ctx);
- dc->hwss.update_info_frame(pipe_ctx);
+ // Enable VPG before building infoframe
+ if (vpg && vpg->funcs->vpg_poweron)
+ vpg->funcs->vpg_poweron(vpg);
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
-
- /* Do not touch link on seamless boot optimization. */
- if (pipe_ctx->stream->apply_seamless_boot_optimization) {
- pipe_ctx->stream->dpms_off = false;
+ resource_build_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
- /* Still enable stream features & audio on seamless boot for DP external displays */
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) {
- enable_stream_features(pipe_ctx);
- dc->hwss.enable_audio_stream(pipe_ctx);
- }
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
- update_psp_stream_config(pipe_ctx, false);
- return;
- }
+ /* Do not touch link on seamless boot optimization. */
+ if (pipe_ctx->stream->apply_seamless_boot_optimization) {
+ pipe_ctx->stream->dpms_off = false;
- /* eDP lit up by bios already, no need to enable again. */
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
- apply_edp_fast_boot_optimization &&
- !pipe_ctx->stream->timing.flags.DSC &&
- !pipe_ctx->next_odm_pipe) {
- pipe_ctx->stream->dpms_off = false;
- update_psp_stream_config(pipe_ctx, false);
- return;
+ /* Still enable stream features & audio on seamless boot for DP external displays */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ enable_stream_features(pipe_ctx);
+ dc->hwss.enable_audio_stream(pipe_ctx);
}
- if (pipe_ctx->stream->dpms_off)
- return;
+ update_psp_stream_config(pipe_ctx, false);
+ return;
+ }
- /* Have to setup DSC before DIG FE and BE are connected (which happens before the
- * link training). This is to make sure the bandwidth sent to DIG BE won't be
- * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
- * will be automatically set at a later time when the video is enabled
- * (DP_VID_STREAM_EN = 1).
- */
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- link_set_dsc_enable(pipe_ctx, true);
+ /* eDP lit up by bios already, no need to enable again. */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ apply_edp_fast_boot_optimization &&
+ !pipe_ctx->stream->timing.flags.DSC &&
+ !pipe_ctx->next_odm_pipe) {
+ pipe_ctx->stream->dpms_off = false;
+ update_psp_stream_config(pipe_ctx, false);
+ return;
+ }
- }
+ if (pipe_ctx->stream->dpms_off)
+ return;
- status = enable_link(state, pipe_ctx);
+ /* Have to setup DSC before DIG FE and BE are connected (which happens before the
+ * link training). This is to make sure the bandwidth sent to DIG BE won't be
+ * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
+ * will be automatically set at a later time when the video is enabled
+ * (DP_VID_STREAM_EN = 1).
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ link_set_dsc_enable(pipe_ctx, true);
- if (status != DC_OK) {
- DC_LOG_WARNING("enabling link %u failed: %d\n",
- pipe_ctx->stream->link->link_index,
- status);
+ }
- /* Abort stream enable *unless* the failure was due to
- * DP link training - some DP monitors will recover and
- * show the stream anyway. But MST displays can't proceed
- * without link training.
- */
- if (status != DC_FAIL_DP_LINK_TRAINING ||
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- if (false == stream->link->link_status.link_active)
- disable_link(stream->link, &pipe_ctx->link_res,
- pipe_ctx->stream->signal);
- BREAK_TO_DEBUGGER();
- return;
- }
- }
+ status = enable_link(state, pipe_ctx);
- /* turn off otg test pattern if enable */
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- COLOR_DEPTH_UNDEFINED);
+ if (status != DC_OK) {
+ DC_LOG_WARNING("enabling link %u failed: %d\n",
+ pipe_ctx->stream->link->link_index,
+ status);
- /* This second call is needed to reconfigure the DIG
- * as a workaround for the incorrect value being applied
- * from transmitter control.
+ /* Abort stream enable *unless* the failure was due to
+ * DP link training - some DP monitors will recover and
+ * show the stream anyway. But MST displays can't proceed
+ * without link training.
*/
- if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
- dp_is_128b_132b_signal(pipe_ctx))) {
- if (link_enc)
- link_enc->funcs->setup(
- link_enc,
+ if (status != DC_FAIL_DP_LINK_TRAINING ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ if (false == stream->link->link_status.link_active)
+ disable_link(stream->link, &pipe_ctx->link_res,
pipe_ctx->stream->signal);
- }
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ }
- dc->hwss.enable_stream(pipe_ctx);
+ /* turn off otg test pattern if enable */
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ COLOR_DEPTH_UNDEFINED);
- /* Set DPS PPS SDP (AKA "info frames") */
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal)) {
- dp_set_dsc_on_rx(pipe_ctx, true);
- link_set_dsc_pps_packet(pipe_ctx, true, true);
- }
+ /* This second call is needed to reconfigure the DIG
+ * as a workaround for the incorrect value being applied
+ * from transmitter control.
+ */
+ if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
+ dp_is_128b_132b_signal(pipe_ctx))) {
+ if (link_enc)
+ link_enc->funcs->setup(
+ link_enc,
+ pipe_ctx->stream->signal);
}
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- allocate_mst_payload(pipe_ctx);
- else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- dp_is_128b_132b_signal(pipe_ctx))
- update_sst_payload(pipe_ctx, true);
+ dc->hwss.enable_stream(pipe_ctx);
- dc->hwss.unblank_stream(pipe_ctx,
- &pipe_ctx->stream->link->cur_link_settings);
+ /* Set DPS PPS SDP (AKA "info frames") */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)) {
+ dp_set_dsc_on_rx(pipe_ctx, true);
+ link_set_dsc_pps_packet(pipe_ctx, true, true);
+ }
+ }
- if (stream->sink_patches.delay_ignore_msa > 0)
- msleep(stream->sink_patches.delay_ignore_msa);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ allocate_mst_payload(pipe_ctx);
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ dp_is_128b_132b_signal(pipe_ctx))
+ update_sst_payload(pipe_ctx, true);
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- enable_stream_features(pipe_ctx);
- update_psp_stream_config(pipe_ctx, false);
+ dc->hwss.unblank_stream(pipe_ctx,
+ &pipe_ctx->stream->link->cur_link_settings);
- dc->hwss.enable_audio_stream(pipe_ctx);
+ if (stream->sink_patches.delay_ignore_msa > 0)
+ msleep(stream->sink_patches.delay_ignore_msa);
- } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- if (dp_is_128b_132b_signal(pipe_ctx))
- dp_fpga_hpo_enable_link_and_stream(state, pipe_ctx);
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- link_set_dsc_enable(pipe_ctx, true);
- }
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ enable_stream_features(pipe_ctx);
+ update_psp_stream_config(pipe_ctx, false);
+
+ dc->hwss.enable_audio_stream(pipe_ctx);
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
set_avmute(pipe_ctx, false);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 1515c817f03b..195ca9e52eda 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -132,6 +132,7 @@ static void construct_link_service_ddc(struct link_service *link_srv)
link_srv->destroy_ddc_service = link_destroy_ddc_service;
link_srv->query_ddc_data = link_query_ddc_data;
link_srv->aux_transfer_raw = link_aux_transfer_raw;
+ link_srv->configure_fixed_vs_pe_retimer = link_configure_fixed_vs_pe_retimer;
link_srv->aux_transfer_with_retries_no_mutex =
link_aux_transfer_with_retries_no_mutex;
link_srv->is_in_aux_transaction_mode = link_is_in_aux_transaction_mode;
@@ -207,6 +208,13 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s
link_srv->edp_set_sink_vtotal_in_psr_active =
edp_set_sink_vtotal_in_psr_active;
link_srv->edp_get_psr_residency = edp_get_psr_residency;
+
+ link_srv->edp_get_replay_state = edp_get_replay_state;
+ link_srv->edp_set_replay_allow_active = edp_set_replay_allow_active;
+ link_srv->edp_setup_replay = edp_setup_replay;
+ link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal;
+ link_srv->edp_replay_residency = edp_replay_residency;
+
link_srv->edp_wait_for_t12 = edp_wait_for_t12;
link_srv->edp_is_ilr_optimization_required =
edp_is_ilr_optimization_required;
@@ -563,11 +571,9 @@ static bool construct_phy(struct dc_link *link,
goto create_fail;
}
- /* TODO: #DAL3 Implement id to str function.*/
- LINK_INFO("Connector[%d] description:"
- "signal %d\n",
+ LINK_INFO("Connector[%d] description: signal: %s\n",
init_params->connector_index,
- link->connector_signal);
+ signal_type_to_string(link->connector_signal));
ddc_service_init_data.ctx = link->ctx;
ddc_service_init_data.id = link->link_id;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index d4b7da526f0a..b45fda96eaf6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -130,7 +130,8 @@ static bool dp_active_dongle_validate_timing(
/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
outputTiming.flags.DSC = 0;
#endif
- if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
+ if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) >
+ dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
return false;
} else { // DP to HDMI TMDS converter
if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
@@ -285,7 +286,7 @@ static bool dp_validate_mode_timing(
link_setting = &link->verified_link_cap;
*/
- req_bw = dc_bandwidth_in_kbps_from_timing(timing);
+ req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link));
max_bw = dp_link_bandwidth_kbps(link, link_setting);
if (req_bw <= max_bw) {
@@ -357,7 +358,11 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
for (uint8_t i = 0; i < num_streams; ++i) {
link[i] = stream[i].link;
- bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing);
+ bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+ dc_link_get_highest_encoding_format(link[i]));
}
+
+ ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index 0fa1228bc178..ecfd83299e75 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -412,6 +412,88 @@ int link_aux_transfer_raw(struct ddc_service *ddc,
}
}
+uint32_t link_get_fixed_vs_pe_retimer_write_address(struct dc_link *link)
+{
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ uint8_t offset;
+
+ switch (link->dpcd_caps.lttpr_caps.phy_repeater_cnt) {
+ case 0x80: // 1 lttpr repeater
+ offset = 1;
+ break;
+ case 0x40: // 2 lttpr repeaters
+ offset = 2;
+ break;
+ case 0x20: // 3 lttpr repeaters
+ offset = 3;
+ break;
+ case 0x10: // 4 lttpr repeaters
+ offset = 4;
+ break;
+ case 0x08: // 5 lttpr repeaters
+ offset = 5;
+ break;
+ case 0x04: // 6 lttpr repeaters
+ offset = 6;
+ break;
+ case 0x02: // 7 lttpr repeaters
+ offset = 7;
+ break;
+ case 0x01: // 8 lttpr repeaters
+ offset = 8;
+ break;
+ default:
+ offset = 0xFF;
+ }
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ }
+ return vendor_lttpr_write_address;
+}
+
+uint32_t link_get_fixed_vs_pe_retimer_read_address(struct dc_link *link)
+{
+ return link_get_fixed_vs_pe_retimer_write_address(link) + 4;
+}
+
+bool link_configure_fixed_vs_pe_retimer(struct ddc_service *ddc, const uint8_t *data, uint32_t length)
+{
+ struct aux_payload write_payload = {
+ .i2c_over_aux = false,
+ .write = true,
+ .address = link_get_fixed_vs_pe_retimer_write_address(ddc->link),
+ .length = length,
+ .data = (uint8_t *) data,
+ .reply = NULL,
+ .mot = I2C_MOT_UNDEF,
+ .write_status_update = false,
+ .defer_delay = 0,
+ };
+
+ return link_aux_transfer_with_retries_no_mutex(ddc,
+ &write_payload);
+}
+
+bool link_query_fixed_vs_pe_retimer(struct ddc_service *ddc, uint8_t *data, uint32_t length)
+{
+ struct aux_payload read_payload = {
+ .i2c_over_aux = false,
+ .write = false,
+ .address = link_get_fixed_vs_pe_retimer_read_address(ddc->link),
+ .length = length,
+ .data = data,
+ .reply = NULL,
+ .mot = I2C_MOT_UNDEF,
+ .write_status_update = false,
+ .defer_delay = 0,
+ };
+
+ return link_aux_transfer_with_retries_no_mutex(ddc,
+ &read_payload);
+}
+
bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
struct aux_payload *payload)
{
@@ -427,7 +509,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc,
if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
!ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
- ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) {
+ ddc->ctx->dce_version == DCN_VERSION_3_1) {
/* Fixed VS workaround for AUX timeout */
const uint32_t fixed_vs_address = 0xF004F;
const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
index 860ef15d7f1b..a3e25e55bed6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -72,6 +72,20 @@ bool link_query_ddc_data(
bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
struct aux_payload *payload);
+bool link_configure_fixed_vs_pe_retimer(
+ struct ddc_service *ddc,
+ const uint8_t *data,
+ uint32_t length);
+
+bool link_query_fixed_vs_pe_retimer(
+ struct ddc_service *ddc,
+ uint8_t *data,
+ uint32_t length);
+
+uint32_t link_get_fixed_vs_pe_retimer_read_address(struct dc_link *link);
+uint32_t link_get_fixed_vs_pe_retimer_write_address(struct dc_link *link);
+
+
void write_scdc_data(
struct ddc_service *ddc_service,
uint32_t pix_clk,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index ba98013fecd0..237e0ff955f3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -326,8 +326,7 @@ bool dp_is_fec_supported(const struct dc_link *link)
return (dc_is_dp_signal(link->connector_signal) && link_enc &&
link_enc->features.fec_supported &&
- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
- !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE);
}
bool dp_should_enable_fec(const struct dc_link *link)
@@ -907,7 +906,7 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
struct dc_link_settings *link_setting)
{
struct dc_link *link = stream->link;
- uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(link));
memset(link_setting, 0, sizeof(*link_setting));
@@ -940,7 +939,8 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
tmp_timing.flags.DSC = 0;
- orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
+ orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing,
+ dc_link_get_highest_encoding_format(link));
edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw);
max_link_rate = tmp_link_setting.link_rate;
}
@@ -1043,9 +1043,7 @@ static enum dc_status wake_up_aux_channel(struct dc_link *link)
DP_SET_POWER,
&dpcd_power_state,
sizeof(dpcd_power_state));
- if (status < 0)
- DC_LOG_DC("%s: Failed to power up sink: %s\n", __func__,
- dpcd_power_state == DP_SET_POWER_D0 ? "D0" : "D3");
+ DC_LOG_DC("%s: Failed to power up sink\n", __func__);
return DC_ERROR_UNEXPECTED;
}
@@ -1396,7 +1394,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data);
cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
link->dc, link->link_enc->transmitter);
- if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
+ if (dm_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
cmd.cable_id.header.ret_status == 1) {
cable_id->raw = cmd.cable_id.data.output_raw;
DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
@@ -1452,7 +1450,8 @@ bool read_is_mst_supported(struct dc_link *link)
*/
static bool dpcd_read_sink_ext_caps(struct dc_link *link)
{
- uint8_t dpcd_data;
+ uint8_t dpcd_data = 0;
+ uint8_t edp_general_cap2 = 0;
if (!link)
return false;
@@ -1461,6 +1460,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return false;
link->dpcd_sink_ext_caps.raw = dpcd_data;
+
+ if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK)
+ return false;
+
+ link->dpcd_caps.panel_luminance_control = (edp_general_cap2 & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) != 0;
+
return true;
}
@@ -1554,6 +1559,9 @@ static bool retrieve_link_cap(struct dc_link *link)
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
const uint32_t post_oui_delay = 30; // 30ms
+ bool is_fec_supported = false;
+ bool is_dsc_basic_supported = false;
+ bool is_dsc_passthrough_supported = false;
memset(dpcd_data, '\0', sizeof(dpcd_data));
memset(&down_strm_port_count,
@@ -1696,6 +1704,7 @@ static bool retrieve_link_cap(struct dc_link *link)
/* TODO - decouple raw mst capability from policy decision */
link->dpcd_caps.is_mst_capable = read_is_mst_supported(link);
+ DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable));
get_active_converter_info(ds_port.byte, link);
@@ -1803,6 +1812,17 @@ static bool retrieve_link_cap(struct dc_link *link)
DP_DSC_SUPPORT,
link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw));
+ if (status == DC_OK) {
+ is_fec_supported = link->dpcd_caps.fec_cap.bits.FEC_CAPABLE;
+ is_dsc_basic_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT;
+ is_dsc_passthrough_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT;
+ DC_LOG_DC("%s: FEC_Sink_Support: %s\n", __func__,
+ str_yes_no(is_fec_supported));
+ DC_LOG_DC("%s: DSC_Basic_Sink_Support: %s\n", __func__,
+ str_yes_no(is_dsc_basic_supported));
+ DC_LOG_DC("%s: DSC_Passthrough_Sink_Support: %s\n", __func__,
+ str_yes_no(is_dsc_passthrough_supported));
+ }
if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
status = core_link_read_dpcd(
link,
@@ -1931,6 +1951,9 @@ void detect_edp_sink_caps(struct dc_link *link)
link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
supported_link_rates[entry]) * 200;
+ DC_LOG_DC("%s: eDP v1.4 supported sink rates: [%d] %d kHz\n", __func__,
+ entry / 2, link_rate_in_khz);
+
if (link_rate_in_khz != 0) {
link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
@@ -1986,6 +2009,16 @@ void detect_edp_sink_caps(struct dc_link *link)
core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP,
&link->dpcd_caps.alpm_caps.raw,
sizeof(link->dpcd_caps.alpm_caps.raw));
+
+ /*
+ * Read REPLAY info
+ */
+ core_link_read_dpcd(link, DP_SINK_PR_PIXEL_DEVIATION_PER_LINE,
+ &link->dpcd_caps.pr_info.pixel_deviation_per_line,
+ sizeof(link->dpcd_caps.pr_info.pixel_deviation_per_line));
+ core_link_read_dpcd(link, DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE,
+ &link->dpcd_caps.pr_info.max_deviation_line,
+ sizeof(link->dpcd_caps.pr_info.max_deviation_line));
}
bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
@@ -2143,7 +2176,9 @@ static bool dp_verify_link_cap(
link,
&irq_data))
(*fail_count)++;
-
+ } else if (status == LINK_TRAINING_LINK_LOSS) {
+ success = true;
+ (*fail_count)++;
} else {
(*fail_count)++;
}
@@ -2166,6 +2201,7 @@ bool dp_verify_link_cap_with_retries(
int i = 0;
bool success = false;
int fail_count = 0;
+ struct dc_link_settings last_verified_link_cap = fail_safe_link_settings;
dp_trace_detect_lt_init(link);
@@ -2182,10 +2218,14 @@ bool dp_verify_link_cap_with_retries(
if (!link_detect_connection_type(link, &type) || type == dc_connection_none) {
link->verified_link_cap = fail_safe_link_settings;
break;
- } else if (dp_verify_link_cap(link, known_limit_link_setting,
- &fail_count) && fail_count == 0) {
- success = true;
- break;
+ } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) {
+ last_verified_link_cap = link->verified_link_cap;
+ if (fail_count == 0) {
+ success = true;
+ break;
+ }
+ } else {
+ link->verified_link_cap = last_verified_link_cap;
}
fsleep(10 * 1000);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 4626fabc0a96..0bb749133909 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -90,7 +90,7 @@ bool dpia_query_hpd_status(struct dc_link *link)
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
/* Return HPD status reported by DMUB if query successfully executed. */
- if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
+ if (dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
is_hpd_high = cmd.query_hpd.data.result;
DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index ba95facc4ee8..e047bbeaa49a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -82,8 +82,15 @@ bool dp_parse_link_loss_status(
}
/* Check interlane align.*/
- if (sink_status_changed ||
- !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
+ if (link_dp_get_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING &&
+ (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b)) {
+ sink_status_changed = true;
+ } else if (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
+ sink_status_changed = true;
+ }
+
+ if (sink_status_changed) {
DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
@@ -175,6 +182,68 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
return false;
}
+static bool handle_hpd_irq_replay_sink(struct dc_link *link)
+{
+ union dpcd_replay_configuration replay_configuration;
+ /*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/
+ union psr_error_status replay_error_status;
+
+ if (!link->replay_settings.replay_feature_enabled)
+ return false;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ DP_SINK_PR_REPLAY_STATUS,
+ &replay_configuration.raw,
+ sizeof(replay_configuration.raw));
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_ERROR_STATUS,
+ &replay_error_status.raw,
+ sizeof(replay_error_status.raw));
+
+ link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR =
+ replay_error_status.bits.LINK_CRC_ERROR;
+ link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR =
+ replay_configuration.bits.DESYNC_ERROR_STATUS;
+ link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR =
+ replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS;
+
+ if (link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR ||
+ link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR ||
+ link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) {
+ bool allow_active;
+
+ /* Acknowledge and clear configuration bits */
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_SINK_PR_REPLAY_STATUS,
+ &replay_configuration.raw,
+ sizeof(replay_configuration.raw));
+
+ /* Acknowledge and clear error bits */
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_ERROR_STATUS,/*DpcdAddress_REPLAY_Error_Status*/
+ &replay_error_status.raw,
+ sizeof(replay_error_status.raw));
+
+ /* Replay error, disable and re-enable Replay */
+ if (link->replay_settings.replay_allow_active) {
+ allow_active = false;
+ edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
+ allow_active = true;
+ edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
+ }
+ }
+ return true;
+}
+
void dp_handle_link_loss(struct dc_link *link)
{
struct pipe_ctx *pipes[MAX_PIPES];
@@ -201,6 +270,25 @@ void dp_handle_link_loss(struct dc_link *link)
}
}
+static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data)
+{
+ enum dc_status retval;
+ union lane_align_status_updated dpcd_lane_status_updated;
+
+ retval = core_link_read_dpcd(
+ link,
+ DP_LANE_ALIGN_STATUS_UPDATED,
+ &dpcd_lane_status_updated.raw,
+ sizeof(union lane_align_status_updated));
+
+ if (retval == DC_OK) {
+ irq_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b =
+ dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b;
+ irq_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b =
+ dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b;
+ }
+}
+
enum dc_status dp_read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data)
@@ -242,6 +330,13 @@ enum dc_status dp_read_hpd_rx_irq_data(
irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
+
+ /*
+ * This display doesn't have correct values in DPCD200Eh.
+ * Read and check DPCD204h instead.
+ */
+ if (link->wa_flags.read_dpcd204h_on_irq_hpd)
+ read_dpcd204h_on_irq_hpd(link, irq_data);
}
return retval;
@@ -327,6 +422,10 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
/* PSR-related error was detected and handled */
return true;
+ if (handle_hpd_irq_replay_sink(link))
+ /* Replay-related error was detected and handled */
+ return true;
+
/* If PSR-related error handled, Main link may be off,
* so do not handle as a normal sink status change interrupt.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 579fa222810d..90339c2dfd84 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1653,10 +1653,19 @@ bool perform_link_training_with_retries(
break;
}
- DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n",
- __func__, link->link_index, (unsigned int)j + 1, attempts,
- cur_link_settings.link_rate, cur_link_settings.lane_count,
- cur_link_settings.link_spread, status);
+ if (j == (attempts - 1)) {
+ DC_LOG_WARNING(
+ "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n",
+ __func__, link->link_index, (unsigned int)j + 1, attempts,
+ cur_link_settings.link_rate, cur_link_settings.lane_count,
+ cur_link_settings.link_spread, status);
+ } else {
+ DC_LOG_HW_LINK_TRAINING(
+ "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n",
+ __func__, link->link_index, (unsigned int)j + 1, attempts,
+ cur_link_settings.link_rate, cur_link_settings.lane_count,
+ cur_link_settings.link_spread, status);
+ }
dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
@@ -1690,13 +1699,20 @@ bool perform_link_training_with_retries(
} else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
uint32_t req_bw;
uint32_t link_bw;
+ enum dc_link_encoding_format link_encoding = DC_LINK_ENCODING_UNSPECIFIED;
decide_fallback_link_setting(link, &max_link_settings,
&cur_link_settings, status);
+
+ if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
+ link_encoding = DC_LINK_ENCODING_DP_8b_10b;
+ else if (link_dp_get_encoding_format(&cur_link_settings) == DP_128b_132b_ENCODING)
+ link_encoding = DC_LINK_ENCODING_DP_128b_132b;
+
/* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
* minimum link bandwidth.
*/
- req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, link_encoding);
link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings);
is_link_bw_low = (req_bw > link_bw);
is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
index 23d380f09a21..db87cfe37b5c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
@@ -211,11 +211,17 @@ enum link_training_result dp_perform_128b_132b_link_training(
dpcd_set_link_settings(link, lt_settings);
- if (result == LINK_TRAINING_SUCCESS)
+ if (result == LINK_TRAINING_SUCCESS) {
result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings);
+ if (result == LINK_TRAINING_SUCCESS)
+ DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__);
+ }
- if (result == LINK_TRAINING_SUCCESS)
+ if (result == LINK_TRAINING_SUCCESS) {
result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings);
+ if (result == LINK_TRAINING_SUCCESS)
+ DC_LOG_HW_LINK_TRAINING("%s: CDS done.\n", __func__);
+ }
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index 3889ebb2256b..2b4c15b0b407 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -388,6 +388,8 @@ enum link_training_result dp_perform_8b_10b_link_training(
link_res,
lt_settings,
repeater_id);
+ if (status == LINK_TRAINING_SUCCESS)
+ DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__);
repeater_training_done(link, repeater_id);
@@ -409,6 +411,8 @@ enum link_training_result dp_perform_8b_10b_link_training(
link_res,
lt_settings,
DPRX);
+ if (status == LINK_TRAINING_SUCCESS)
+ DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index 5731c4b61f9f..fd8f6f198146 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -36,6 +36,7 @@
#include "link_dpcd.h"
#include "link_dp_phy.h"
#include "link_dp_capability.h"
+#include "link_ddc.h"
#define DC_LOGGER \
link->ctx->logger
@@ -46,42 +47,20 @@ void dp_fixed_vs_pe_read_lane_adjust(
{
const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63};
const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63};
- const uint8_t offset = dp_parse_lttpr_repeater_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- uint32_t vendor_lttpr_write_address = 0xF004F;
- uint32_t vendor_lttpr_read_address = 0xF0053;
uint8_t dprx_vs = 0;
uint8_t dprx_pe = 0;
uint8_t lane;
- if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- vendor_lttpr_read_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- }
-
/* W/A to read lane settings requested by DPRX */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_read_dpcd(
- link,
- vendor_lttpr_read_address,
- &dprx_vs,
- 1);
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
- core_link_read_dpcd(
- link,
- vendor_lttpr_read_address,
- &dprx_pe,
- 1);
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+
+ link_query_fixed_vs_pe_retimer(link->ddc, &dprx_vs, 1);
+
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
+
+ link_query_fixed_vs_pe_retimer(link->ddc, &dprx_pe, 1);
for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3;
@@ -95,19 +74,11 @@ void dp_fixed_vs_pe_set_retimer_lane_settings(
const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
uint8_t lane_count)
{
- const uint8_t offset = dp_parse_lttpr_repeater_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
- uint32_t vendor_lttpr_write_address = 0xF004F;
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
uint8_t lane = 0;
- if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- }
-
for (lane = 0; lane < lane_count; lane++) {
vendor_lttpr_write_data_vs[3] |=
dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
@@ -116,21 +87,14 @@ void dp_fixed_vs_pe_set_retimer_lane_settings(
}
/* Force LTTPR to output desired VS and PE */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_reset[0],
- sizeof(vendor_lttpr_write_data_reset));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset));
+
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
}
static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence(
@@ -233,10 +197,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
- uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
+ uint32_t pre_disable_intercept_delay_ms = 0;
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
- uint32_t vendor_lttpr_write_address = 0xF004F;
+ const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+ const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+ const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+ const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+ const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t lane = 0;
union down_spread_ctrl downspread = {0};
@@ -254,37 +222,27 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
}
if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ if (offset == 2) {
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
/* Certain display and cable configuration require extra delay */
- if (offset > 2)
+ } else if (offset > 2) {
pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+ }
}
/* Vendor specific: Reset lane settings */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_reset[0],
- sizeof(vendor_lttpr_write_data_reset));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
/* Vendor specific: Enable intercept */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_intercept_en[0],
- sizeof(vendor_lttpr_write_data_intercept_en));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en));
+
/* 1. set link rate, lane count and spread. */
@@ -335,6 +293,19 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
+ if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5));
+ }
+
/* 2. Perform link training */
/* Perform Clock Recovery Sequence */
@@ -347,7 +318,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- enum dc_status dpcd_status = DC_OK;
uint8_t i = 0;
retries_cr = 0;
@@ -380,19 +350,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
0);
/* Vendor specific: Disable intercept */
for (i = 0; i < max_vendor_dpcd_retries; i++) {
- msleep(pre_disable_intercept_delay_ms);
- dpcd_status = core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ if (pre_disable_intercept_delay_ms != 0)
+ msleep(pre_disable_intercept_delay_ms);
+ if (link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_intercept_dis[0],
- sizeof(vendor_lttpr_write_data_intercept_dis));
-
- if (dpcd_status == DC_OK)
+ sizeof(vendor_lttpr_write_data_intercept_dis)))
break;
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_intercept_en[0],
sizeof(vendor_lttpr_write_data_intercept_en));
}
@@ -408,16 +373,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
}
/* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
dpcd_set_lane_settings(
link,
@@ -513,16 +472,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
}
/* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
/* 2. update DPCD*/
if (!retries_ch_eq)
@@ -591,11 +544,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E};
const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01};
const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68};
- uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
+ uint32_t pre_disable_intercept_delay_ms = 0;
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
-
- uint32_t vendor_lttpr_write_address = 0xF004F;
+ const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+ const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+ const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+ const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+ const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t lane = 0;
union down_spread_ctrl downspread = {0};
@@ -613,37 +569,26 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
}
if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ if (offset == 2) {
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
/* Certain display and cable configuration require extra delay */
- if (offset > 2)
+ } else if (offset > 2) {
pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+ }
}
/* Vendor specific: Reset lane settings */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_reset[0],
- sizeof(vendor_lttpr_write_data_reset));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
/* Vendor specific: Enable intercept */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_intercept_en[0],
- sizeof(vendor_lttpr_write_data_intercept_en));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en));
/* 1. set link rate, lane count and spread. */
@@ -694,6 +639,19 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
+ if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5));
+ }
+
/* 2. Perform link training */
/* Perform Clock Recovery Sequence */
@@ -706,7 +664,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- enum dc_status dpcd_status = DC_OK;
uint8_t i = 0;
retries_cr = 0;
@@ -739,19 +696,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
0);
/* Vendor specific: Disable intercept */
for (i = 0; i < max_vendor_dpcd_retries; i++) {
- msleep(pre_disable_intercept_delay_ms);
- dpcd_status = core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ if (pre_disable_intercept_delay_ms != 0)
+ msleep(pre_disable_intercept_delay_ms);
+ if (link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_intercept_dis[0],
- sizeof(vendor_lttpr_write_data_intercept_dis));
-
- if (dpcd_status == DC_OK)
+ sizeof(vendor_lttpr_write_data_intercept_dis)))
break;
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_intercept_en[0],
sizeof(vendor_lttpr_write_data_intercept_en));
}
@@ -767,16 +719,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
}
/* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
dpcd_set_lane_settings(
link,
@@ -849,17 +795,14 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_adicora_eq1[0],
sizeof(vendor_lttpr_write_data_adicora_eq1));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
+ link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_adicora_eq2[0],
sizeof(vendor_lttpr_write_data_adicora_eq2));
+
/* Note: also check that TPS4 is a supported feature*/
tr_pattern = lt_settings->pattern_for_eq;
@@ -883,16 +826,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
}
/* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
/* 2. update DPCD*/
if (!retries_ch_eq) {
@@ -905,11 +842,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
lt_settings,
tr_pattern, 0);
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_adicora_eq3[0],
- sizeof(vendor_lttpr_write_data_adicora_eq3));
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_adicora_eq3[0],
+ sizeof(vendor_lttpr_write_data_adicora_eq3));
+
} else
dpcd_set_lane_settings(link, lt_settings, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 8d1df863659c..98e715aa6d8e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -34,9 +34,13 @@
#include "dm_helpers.h"
#include "dal_asic_id.h"
#include "dce/dmub_psr.h"
+#include "dc/dc_dmub_srv.h"
+#include "dce/dmub_replay.h"
#include "abm.h"
#define DC_LOGGER_INIT(logger)
+#define DP_SINK_PR_ENABLE_AND_CONFIGURATION 0x37B
+
/* Travis */
static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
/* Nutmeg */
@@ -46,43 +50,42 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
{
union dpcd_edp_config edp_config_set;
bool panel_mode_edp = false;
+ enum dc_status result;
memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
- if (panel_mode != DP_PANEL_MODE_DEFAULT) {
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ case DP_PANEL_MODE_SPECIAL:
+ panel_mode_edp = true;
+ break;
- switch (panel_mode) {
- case DP_PANEL_MODE_EDP:
- case DP_PANEL_MODE_SPECIAL:
- panel_mode_edp = true;
- break;
+ default:
+ break;
+ }
- default:
- break;
- }
+ /*set edp panel mode in receiver*/
+ result = core_link_read_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ if (result == DC_OK &&
+ edp_config_set.bits.PANEL_MODE_EDP
+ != panel_mode_edp) {
- /*set edp panel mode in receiver*/
- core_link_read_dpcd(
+ edp_config_set.bits.PANEL_MODE_EDP =
+ panel_mode_edp;
+ result = core_link_write_dpcd(
link,
DP_EDP_CONFIGURATION_SET,
&edp_config_set.raw,
sizeof(edp_config_set.raw));
- if (edp_config_set.bits.PANEL_MODE_EDP
- != panel_mode_edp) {
- enum dc_status result;
-
- edp_config_set.bits.PANEL_MODE_EDP =
- panel_mode_edp;
- result = core_link_write_dpcd(
- link,
- DP_EDP_CONFIGURATION_SET,
- &edp_config_set.raw,
- sizeof(edp_config_set.raw));
-
- ASSERT(result == DC_OK);
- }
+ ASSERT(result == DC_OK);
}
+
link->panel_mode = panel_mode;
DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
"eDP panel mode enabled: %d \n",
@@ -164,15 +167,37 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
*(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+ link->backlight_settings.backlight_millinits = backlight_millinits;
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ if (!link->dpcd_caps.panel_luminance_control) {
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
(uint8_t *)(&dpcd_backlight_set),
sizeof(dpcd_backlight_set)) != DC_OK)
- return false;
+ return false;
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
&backlight_control, 1) != DC_OK)
- return false;
+ return false;
+ } else {
+ const uint8_t backlight_enable = DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE;
+ struct target_luminance_value *target_luminance = NULL;
+
+ //if target luminance value is greater than 24 bits, clip the value to 24 bits
+ if (backlight_millinits > 0xFFFFFF)
+ backlight_millinits = 0xFFFFFF;
+
+ target_luminance = (struct target_luminance_value *)&backlight_millinits;
+
+ if (core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &backlight_enable,
+ sizeof(backlight_enable)) != DC_OK)
+ return false;
+
+ if (core_link_write_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
+ (uint8_t *)(target_luminance),
+ sizeof(struct target_luminance_value)) != DC_OK)
+ return false;
+ }
return true;
}
@@ -230,10 +255,20 @@ static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millin
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
- if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
- (uint8_t *) backlight_millinits,
- sizeof(uint32_t)))
- return false;
+ if (!link->dpcd_caps.panel_luminance_control) {
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *)backlight_millinits,
+ sizeof(uint32_t)))
+ return false;
+ } else {
+ //setting to 0 as a precaution, since target_luminance_value is 3 bytes
+ memset(backlight_millinits, 0, sizeof(uint32_t));
+
+ if (!core_link_read_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
+ (uint8_t *)backlight_millinits,
+ sizeof(struct target_luminance_value)))
+ return false;
+ }
return true;
}
@@ -255,6 +290,16 @@ bool set_default_brightness_aux(struct dc_link *link)
return false;
}
+bool set_cached_brightness_aux(struct dc_link *link)
+{
+ if (link->backlight_settings.backlight_millinits)
+ return edp_set_backlight_level_nits(link, true,
+ link->backlight_settings.backlight_millinits, 0);
+ else
+ return set_default_brightness_aux(link);
+ return false;
+}
+
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing)
{
@@ -288,7 +333,7 @@ bool edp_is_ilr_optimization_required(struct dc_link *link,
core_link_read_dpcd(link, DP_LANE_COUNT_SET,
&lane_count_set.raw, sizeof(lane_count_set));
- req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
+ req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing, dc_link_get_highest_encoding_format(link));
if (!crtc_timing->flags.DSC)
edp_decide_link_settings(link, &link_setting, req_bw);
@@ -786,6 +831,167 @@ bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_
return true;
}
+bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+
+ if (replay == NULL && force_static)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ /* Set power optimization flag */
+ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
+ if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) {
+ replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
+ link->replay_settings.replay_power_opt_active = *power_opts;
+ }
+ }
+
+ /* Activate or deactivate Replay */
+ if (allow_active && link->replay_settings.replay_allow_active != *allow_active) {
+ // TODO: Handle mux change case if force_static is set
+ // If force_static is set, just change the replay_allow_active state directly
+ if (replay != NULL && link->replay_settings.replay_feature_enabled)
+ replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst);
+ link->replay_settings.replay_allow_active = *allow_active;
+ }
+
+ return true;
+}
+
+bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+ enum replay_state pr_state = REPLAY_STATE_0;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ if (replay != NULL && link->replay_settings.replay_feature_enabled)
+ replay->funcs->replay_get_state(replay, &pr_state, panel_inst);
+ *state = pr_state;
+
+ return true;
+}
+
+bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
+{
+ /* To-do: Setup Replay */
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ int i;
+ unsigned int panel_inst;
+ struct replay_context replay_context = { 0 };
+ unsigned int lineTimeInNs = 0;
+
+
+ union replay_enable_and_configuration replay_config;
+
+ union dpcd_alpm_configuration alpm_config;
+
+ replay_context.controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (!link)
+ return false;
+
+ if (!replay)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel;
+ replay_context.digbe_inst = link->link_enc->transmitter;
+ replay_context.digfe_inst = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ replay_context.controllerId =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ lineTimeInNs =
+ ((stream->timing.h_total * 1000000) /
+ (stream->timing.pix_clk_100hz / 10)) + 1;
+
+ replay_context.line_time_in_ns = lineTimeInNs;
+
+ if (replay)
+ link->replay_settings.replay_feature_enabled =
+ replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
+ if (link->replay_settings.replay_feature_enabled) {
+
+ replay_config.bits.FREESYNC_PANEL_REPLAY_MODE = 1;
+ replay_config.bits.TIMING_DESYNC_ERROR_VERIFICATION =
+ link->replay_settings.config.replay_timing_sync_supported;
+ replay_config.bits.STATE_TRANSITION_ERROR_DETECTION = 1;
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_SINK_PR_ENABLE_AND_CONFIGURATION,
+ (uint8_t *)&(replay_config.raw), sizeof(uint8_t));
+
+ memset(&alpm_config, 0, sizeof(alpm_config));
+ alpm_config.bits.ENABLE = 1;
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_RECEIVER_ALPM_CONFIG,
+ &alpm_config.raw,
+ sizeof(alpm_config.raw));
+ }
+ return true;
+}
+
+bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+
+ if (!replay)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
+ replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst);
+ link->replay_settings.coasting_vtotal = coasting_vtotal;
+ }
+
+ return true;
+}
+
+bool edp_replay_residency(const struct dc_link *link,
+ unsigned int *residency, const bool is_start, const bool is_alpm)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ if (replay != NULL && link->replay_settings.replay_feature_enabled)
+ replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm);
+ else
+ *residency = 0;
+
+ return true;
+}
+
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 28f552080558..0a5bbda8c739 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -30,6 +30,7 @@
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool set_default_brightness_aux(struct dc_link *link);
+bool set_cached_brightness_aux(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
int edp_get_backlight_level(const struct dc_link *link);
bool edp_get_backlight_level_nits(struct dc_link *link,
@@ -52,6 +53,14 @@ bool edp_setup_psr(struct dc_link *link,
bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link,
uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency);
+bool edp_set_replay_allow_active(struct dc_link *dc_link, const bool *enable,
+ bool wait, bool force_static, const unsigned int *power_opts);
+bool edp_setup_replay(struct dc_link *link,
+ const struct dc_stream_state *stream);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
+bool edp_replay_residency(const struct dc_link *link,
+ unsigned int *residency, const bool is_start, const bool is_alpm);
+bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index ba1715e2d25a..2d995c87fbb9 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -271,7 +271,7 @@ struct dmub_srv_hw_params {
*/
struct dmub_diagnostic_data {
uint32_t dmcub_version;
- uint32_t scratch[16];
+ uint32_t scratch[17];
uint32_t pc;
uint32_t undefined_address_fault_addr;
uint32_t inst_fetch_fault_addr;
@@ -282,6 +282,7 @@ struct dmub_diagnostic_data {
uint32_t inbox0_rptr;
uint32_t inbox0_wptr;
uint32_t inbox0_size;
+ uint32_t gpint_datain0;
uint8_t is_dmcub_enabled : 1;
uint8_t is_dmcub_soft_reset : 1;
uint8_t is_dmcub_secure_reset : 1;
@@ -340,6 +341,8 @@ struct dmub_srv_hw_funcs {
void (*setup_mailbox)(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
+ uint32_t (*get_inbox1_wptr)(struct dmub_srv *dmub);
+
uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub);
void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
@@ -364,9 +367,10 @@ struct dmub_srv_hw_funcs {
bool (*is_supported)(struct dmub_srv *dmub);
+ bool (*is_psrsu_supported)(struct dmub_srv *dmub);
+
bool (*is_hw_init)(struct dmub_srv *dmub);
- bool (*is_phy_init)(struct dmub_srv *dmub);
void (*enable_dmub_boot_options)(struct dmub_srv *dmub,
const struct dmub_srv_hw_params *params);
@@ -374,6 +378,7 @@ struct dmub_srv_hw_funcs {
union dmub_fw_boot_status (*get_fw_status)(struct dmub_srv *dmub);
+ union dmub_fw_boot_options (*get_fw_boot_option)(struct dmub_srv *dmub);
void (*set_gpint)(struct dmub_srv *dmub,
union dmub_gpint_data_register reg);
@@ -490,7 +495,7 @@ struct dmub_notification {
* of a firmware to know if feature or functionality is supported or present.
*/
#define DMUB_FW_VERSION(major, minor, revision) \
- ((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | ((revision) & 0xFFFF))
+ ((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | (((revision) & 0xFF) << 8))
/**
* dmub_srv_create() - creates the DMUB service.
@@ -602,6 +607,18 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
/**
+ * dmub_srv_sync_inbox1() - sync sw state with hw state
+ * @dmub: the dmub service
+ *
+ * Sync sw state with hw state when resume from S0i3
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
+
+/**
* dmub_srv_cmd_queue() - queues a command to the DMUB
* @dmub: the dmub service
* @cmd: the command to queue
@@ -762,9 +779,15 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb);
enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
union dmub_fw_boot_status *status);
+enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
+ union dmub_fw_boot_options *option);
+
enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
+enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
+ bool skip);
+
bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry);
bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 598fa1de54ce..7afa78b918b5 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -170,6 +170,95 @@ extern "C" {
#endif
#pragma pack(push, 1)
+#define ABM_NUM_OF_ACE_SEGMENTS 5
+
+union abm_flags {
+ struct {
+ /**
+ * @abm_enabled: Indicates if ABM is enabled.
+ */
+ unsigned int abm_enabled : 1;
+
+ /**
+ * @disable_abm_requested: Indicates if driver has requested ABM to be disabled.
+ */
+ unsigned int disable_abm_requested : 1;
+
+ /**
+ * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled
+ * immediately.
+ */
+ unsigned int disable_abm_immediately : 1;
+
+ /**
+ * @disable_abm_immediate_keep_gain: Indicates if driver has requested ABM
+ * to be disabled immediately and keep gain.
+ */
+ unsigned int disable_abm_immediate_keep_gain : 1;
+
+ /**
+ * @fractional_pwm: Indicates if fractional duty cycle for backlight PWM is enabled.
+ */
+ unsigned int fractional_pwm : 1;
+
+ /**
+ * @abm_gradual_bl_change: Indicates if algorithm has completed gradual adjustment
+ * of user backlight level.
+ */
+ unsigned int abm_gradual_bl_change : 1;
+ } bitfields;
+
+ unsigned int u32All;
+};
+
+struct abm_save_restore {
+ /**
+ * @flags: Misc. ABM flags.
+ */
+ union abm_flags flags;
+
+ /**
+ * @pause: true: pause ABM and get state
+ * false: unpause ABM after setting state
+ */
+ uint32_t pause;
+
+ /**
+ * @next_ace_slope: Next ACE slopes to be programmed in HW (u3.13)
+ */
+ uint32_t next_ace_slope[ABM_NUM_OF_ACE_SEGMENTS];
+
+ /**
+ * @next_ace_thresh: Next ACE thresholds to be programmed in HW (u10.6)
+ */
+ uint32_t next_ace_thresh[ABM_NUM_OF_ACE_SEGMENTS];
+
+ /**
+ * @next_ace_offset: Next ACE offsets to be programmed in HW (u10.6)
+ */
+ uint32_t next_ace_offset[ABM_NUM_OF_ACE_SEGMENTS];
+
+
+ /**
+ * @knee_threshold: Current x-position of ACE knee (u0.16).
+ */
+ uint32_t knee_threshold;
+ /**
+ * @current_gain: Current backlight reduction (u16.16).
+ */
+ uint32_t current_gain;
+ /**
+ * @curr_bl_level: Current actual backlight level converging to target backlight level.
+ */
+ uint16_t curr_bl_level;
+
+ /**
+ * @curr_user_bl_level: Current nominal backlight level converging to level requested by user.
+ */
+ uint16_t curr_user_bl_level;
+
+};
+
/**
* union dmub_addr - DMUB physical/virtual 64-bit address.
*/
@@ -249,6 +338,112 @@ union dmub_psr_debug_flags {
};
/**
+ * Flags that can be set by driver to change some Replay behaviour.
+ */
+union replay_debug_flags {
+ struct {
+ /**
+ * Enable visual confirm in FW.
+ */
+ uint32_t visual_confirm : 1;
+
+ /**
+ * @skip_crc: Set if need to skip CRC.
+ */
+ uint32_t skip_crc : 1;
+
+ /**
+ * @force_link_power_on: Force disable ALPM control
+ */
+ uint32_t force_link_power_on : 1;
+
+ /**
+ * @force_phy_power_on: Force phy power on
+ */
+ uint32_t force_phy_power_on : 1;
+
+ /**
+ * @timing_resync_disabled: Disabled Replay normal sleep mode timing resync
+ */
+ uint32_t timing_resync_disabled : 1;
+
+ /**
+ * @skip_crtc_disabled: CRTC disable skipped
+ */
+ uint32_t skip_crtc_disabled : 1;
+
+ /**
+ * @force_defer_one_frame_update: Force defer one frame update in ultra sleep mode
+ */
+ uint32_t force_defer_one_frame_update : 1;
+ /**
+ * @disable_delay_alpm_on: Force disable delay alpm on
+ */
+ uint32_t disable_delay_alpm_on : 1;
+ /**
+ * @disable_desync_error_check: Force disable desync error check
+ */
+ uint32_t disable_desync_error_check : 1;
+ /**
+ * @disable_desync_error_check: Force disable desync error check
+ */
+ uint32_t disable_dmub_save_restore : 1;
+
+ uint32_t reserved : 22;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
+union replay_hw_flags {
+ struct {
+ /**
+ * @allow_alpm_fw_standby_mode: To indicate whether the
+ * ALPM FW standby mode is allowed
+ */
+ uint32_t allow_alpm_fw_standby_mode : 1;
+
+ /*
+ * @dsc_enable_status: DSC enable status in driver
+ */
+ uint32_t dsc_enable_status : 1;
+
+ /**
+ * @fec_enable_status: receive fec enable/disable status from driver
+ */
+ uint32_t fec_enable_status : 1;
+
+ /*
+ * @smu_optimizations_en: SMU power optimization.
+ * Only when active display is Replay capable and display enters Replay.
+ * Trigger interrupt to SMU to powerup/down.
+ */
+ uint32_t smu_optimizations_en : 1;
+
+ /**
+ * @otg_powered_down: Flag to keep track of OTG power state.
+ */
+ uint32_t otg_powered_down : 1;
+
+ /**
+ * @phy_power_state: Indicates current phy power state
+ */
+ uint32_t phy_power_state : 1;
+
+ /**
+ * @link_power_state: Indicates current link power state
+ */
+ uint32_t link_power_state : 1;
+ /**
+ * Use TPS3 signal when restore main link.
+ */
+ uint32_t force_wakeup_by_tps3 : 1;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
+/**
* DMUB visual confirm color
*/
struct dmub_feature_caps {
@@ -257,7 +452,9 @@ struct dmub_feature_caps {
*/
uint8_t psr;
uint8_t fw_assisted_mclk_switch;
- uint8_t reserved[6];
+ uint8_t reserved[4];
+ uint8_t subvp_psr_support;
+ uint8_t gecc_enable;
};
struct dmub_visual_confirm_color {
@@ -360,7 +557,7 @@ union dmub_fw_boot_status {
uint32_t optimized_init_done : 1; /**< 1 if optimized init done */
uint32_t restore_required : 1; /**< 1 if driver should call restore */
uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */
- uint32_t reserved : 1;
+ uint32_t fams_enabled : 1; /**< 1 if VBIOS data is deferred programmed */
uint32_t detection_required: 1; /**< if detection need to be triggered by driver */
uint32_t hw_power_init_done: 1; /**< 1 if hw power init is completed */
} bits; /**< status bits */
@@ -376,6 +573,7 @@ enum dmub_fw_boot_status_bit {
DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if init done */
DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), /**< 1 if driver should call restore */
DMUB_FW_BOOT_STATUS_BIT_DEFERRED_LOADED = (1 << 4), /**< 1 if VBIOS data is deferred programmed */
+ DMUB_FW_BOOT_STATUS_BIT_FAMS_ENABLED = (1 << 5), /**< 1 if FAMS is enabled*/
DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/
DMUB_FW_BOOT_STATUS_BIT_HW_POWER_INIT_DONE = (1 << 7), /**< 1 if hw power init is completed */
};
@@ -395,6 +593,12 @@ enum dmub_lvtma_status_bit {
DMUB_LVTMA_STATUS_BIT_EDP_ON = (1 << 1),
};
+enum dmub_ips_disable_type {
+ DMUB_IPS_DISABLE_IPS1 = 1,
+ DMUB_IPS_DISABLE_IPS2 = 2,
+ DMUB_IPS_DISABLE_IPS2_Z10 = 3,
+};
+
/**
* union dmub_fw_boot_options - Boot option definitions for SCRATCH14
*/
@@ -419,7 +623,10 @@ union dmub_fw_boot_options {
uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */
uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/
- uint32_t reserved : 14; /**< reserved */
+ uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */
+ uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/
+ uint32_t ips_disable: 2; /* options to disable ips support*/
+ uint32_t reserved : 10; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -554,9 +761,42 @@ enum dmub_gpint_command {
DMUB_GPINT__PSR_RESIDENCY = 9,
/**
+ * DESC: Get REPLAY state from FW.
+ * RETURN: REPLAY state enum. This enum may need to be converted to the legacy REPLAY state value.
+ */
+ DMUB_GPINT__GET_REPLAY_STATE = 13,
+
+ /**
+ * DESC: Start REPLAY residency counter. Stop REPLAY resdiency counter and get value.
+ * ARGS: We can measure residency from various points. The argument will specify the residency mode.
+ * By default, it is measured from after we powerdown the PHY, to just before we powerup the PHY.
+ * RETURN: REPLAY residency in milli-percent.
+ */
+ DMUB_GPINT__REPLAY_RESIDENCY = 14,
+
+
+ /**
* DESC: Notifies DMCUB detection is done so detection required can be cleared.
*/
DMUB_GPINT__NOTIFY_DETECTION_DONE = 12,
+ /**
+ * DESC: Updates the trace buffer lower 32-bit mask.
+ * ARGS: The new mask
+ * RETURN: Lower 32-bit mask.
+ */
+ DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK = 101,
+ /**
+ * DESC: Updates the trace buffer lower 32-bit mask.
+ * ARGS: The new mask
+ * RETURN: Lower 32-bit mask.
+ */
+ DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0 = 102,
+ /**
+ * DESC: Updates the trace buffer mask bi0~bit15.
+ * ARGS: The new mask
+ * RETURN: Lower 32-bit mask.
+ */
+ DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1 = 103,
};
/**
@@ -752,6 +992,11 @@ enum dmub_cmd_type {
*/
/**
+ * Command type used for all REPLAY commands.
+ */
+ DMUB_CMD__REPLAY = 83,
+
+ /**
* Command type used for all SECURE_DISPLAY commands.
*/
DMUB_CMD__SECURE_DISPLAY = 85,
@@ -988,16 +1233,25 @@ struct dmub_rb_cmd_mall {
};
/**
- * enum dmub_cmd_cab_type - TODO:
+ * enum dmub_cmd_cab_type - CAB command data.
*/
enum dmub_cmd_cab_type {
+ /**
+ * No idle optimizations (i.e. no CAB)
+ */
DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION = 0,
+ /**
+ * No DCN requests for memory
+ */
DMUB_CMD__CAB_NO_DCN_REQ = 1,
+ /**
+ * Fit surfaces in CAB (i.e. CAB enable)
+ */
DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB = 2,
};
/**
- * struct dmub_rb_cmd_cab_for_ss - TODO:
+ * struct dmub_rb_cmd_cab - CAB command data.
*/
struct dmub_rb_cmd_cab_for_ss {
struct dmub_cmd_header header;
@@ -1005,6 +1259,9 @@ struct dmub_rb_cmd_cab_for_ss {
uint8_t debug_bits; /* debug bits */
};
+/**
+ * Enum for indicating which MCLK switch mode per pipe
+ */
enum mclk_switch_mode {
NONE = 0,
FPO = 1,
@@ -1125,8 +1382,6 @@ struct dmub_rb_cmd_idle_opt_dcn_restore {
*/
struct dmub_dcn_notify_idle_cntl_data {
uint8_t driver_idle;
- uint8_t d3_entry;
- uint8_t trigger;
uint8_t pad[1];
};
@@ -1889,6 +2144,10 @@ enum dmub_phy_fsm_state {
DMUB_PHY_FSM_PLL_EN,
DMUB_PHY_FSM_TX_EN,
DMUB_PHY_FSM_FAST_LP,
+ DMUB_PHY_FSM_P2_PLL_OFF_CPM,
+ DMUB_PHY_FSM_P2_PLL_OFF_PG,
+ DMUB_PHY_FSM_P2_PLL_OFF,
+ DMUB_PHY_FSM_P2_PLL_ON,
};
/**
@@ -2474,6 +2733,272 @@ struct dmub_cmd_psr_set_power_opt_data {
uint32_t power_opt;
};
+#define REPLAY_RESIDENCY_MODE_SHIFT (0)
+#define REPLAY_RESIDENCY_ENABLE_SHIFT (1)
+
+#define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
+# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
+# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
+
+#define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
+# define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT)
+# define REPLAY_RESIDENCY_ENABLE (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
+
+enum replay_state {
+ REPLAY_STATE_0 = 0x0,
+ REPLAY_STATE_1 = 0x10,
+ REPLAY_STATE_1A = 0x11,
+ REPLAY_STATE_2 = 0x20,
+ REPLAY_STATE_3 = 0x30,
+ REPLAY_STATE_3INIT = 0x31,
+ REPLAY_STATE_4 = 0x40,
+ REPLAY_STATE_4A = 0x41,
+ REPLAY_STATE_4B = 0x42,
+ REPLAY_STATE_4C = 0x43,
+ REPLAY_STATE_4D = 0x44,
+ REPLAY_STATE_4B_LOCKED = 0x4A,
+ REPLAY_STATE_4C_UNLOCKED = 0x4B,
+ REPLAY_STATE_5 = 0x50,
+ REPLAY_STATE_5A = 0x51,
+ REPLAY_STATE_5B = 0x52,
+ REPLAY_STATE_5A_LOCKED = 0x5A,
+ REPLAY_STATE_5B_UNLOCKED = 0x5B,
+ REPLAY_STATE_6 = 0x60,
+ REPLAY_STATE_6A = 0x61,
+ REPLAY_STATE_6B = 0x62,
+ REPLAY_STATE_INVALID = 0xFF,
+};
+
+/**
+ * Replay command sub-types.
+ */
+enum dmub_cmd_replay_type {
+ /**
+ * Copy driver-calculated parameters to REPLAY state.
+ */
+ DMUB_CMD__REPLAY_COPY_SETTINGS = 0,
+ /**
+ * Enable REPLAY.
+ */
+ DMUB_CMD__REPLAY_ENABLE = 1,
+ /**
+ * Set Replay power option.
+ */
+ DMUB_CMD__SET_REPLAY_POWER_OPT = 2,
+ /**
+ * Set coasting vtotal.
+ */
+ DMUB_CMD__REPLAY_SET_COASTING_VTOTAL = 3,
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command.
+ */
+struct dmub_cmd_replay_copy_settings_data {
+ /**
+ * Flags that can be set by driver to change some replay behaviour.
+ */
+ union replay_debug_flags debug;
+
+ /**
+ * @flags: Flags used to determine feature functionality.
+ */
+ union replay_hw_flags flags;
+
+ /**
+ * DPP HW instance.
+ */
+ uint8_t dpp_inst;
+ /**
+ * OTG HW instance.
+ */
+ uint8_t otg_inst;
+ /**
+ * DIG FE HW instance.
+ */
+ uint8_t digfe_inst;
+ /**
+ * DIG BE HW instance.
+ */
+ uint8_t digbe_inst;
+ /**
+ * AUX HW instance.
+ */
+ uint8_t aux_inst;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * @pixel_deviation_per_line: Indicate the maximum pixel deviation per line compare
+ * to Source timing when Sink maintains coasting vtotal during the Replay normal sleep mode
+ */
+ uint8_t pixel_deviation_per_line;
+ /**
+ * @max_deviation_line: The max number of deviation line that can keep the timing
+ * synchronized between the Source and Sink during Replay normal sleep mode.
+ */
+ uint8_t max_deviation_line;
+ /**
+ * Length of each horizontal line in ns.
+ */
+ uint32_t line_time_in_ns;
+ /**
+ * PHY instance.
+ */
+ uint8_t dpphy_inst;
+ /**
+ * Determines if SMU optimzations are enabled/disabled.
+ */
+ uint8_t smu_optimizations_en;
+ /**
+ * Determines if timing sync are enabled/disabled.
+ */
+ uint8_t replay_timing_sync_supported;
+ /*
+ * Use FSM state for Replay power up/down
+ */
+ uint8_t use_phy_fsm;
+};
+
+/**
+ * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
+ */
+struct dmub_rb_cmd_replay_copy_settings {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command.
+ */
+ struct dmub_cmd_replay_copy_settings_data replay_copy_settings_data;
+};
+
+/**
+ * Replay disable / enable state for dmub_rb_cmd_replay_enable_data.enable
+ */
+enum replay_enable {
+ /**
+ * Disable REPLAY.
+ */
+ REPLAY_DISABLE = 0,
+ /**
+ * Enable REPLAY.
+ */
+ REPLAY_ENABLE = 1,
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_ENABLE command.
+ */
+struct dmub_rb_cmd_replay_enable_data {
+ /**
+ * Replay enable or disable.
+ */
+ uint8_t enable;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Phy state to enter.
+ * Values to use are defined in dmub_phy_fsm_state
+ */
+ uint8_t phy_fsm_state;
+ /**
+ * Phy rate for DP - RBR/HBR/HBR2/HBR3.
+ * Set this using enum phy_link_rate.
+ * This does not support HDMI/DP2 for now.
+ */
+ uint8_t phy_rate;
+};
+
+/**
+ * Definition of a DMUB_CMD__REPLAY_ENABLE command.
+ * Replay enable/disable is controlled using action in data.
+ */
+struct dmub_rb_cmd_replay_enable {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ struct dmub_rb_cmd_replay_enable_data data;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__SET_REPLAY_POWER_OPT command.
+ */
+struct dmub_cmd_replay_set_power_opt_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[3];
+ /**
+ * REPLAY power option
+ */
+ uint32_t power_opt;
+};
+
+/**
+ * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
+ */
+struct dmub_rb_cmd_replay_set_power_opt {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
+ */
+ struct dmub_cmd_replay_set_power_opt_data replay_set_power_opt_data;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command.
+ */
+struct dmub_cmd_replay_set_coasting_vtotal_data {
+ /**
+ * 16-bit value dicated by driver that indicates the coasting vtotal.
+ */
+ uint16_t coasting_vtotal;
+ /**
+ * REPLAY control version.
+ */
+ uint8_t cmd_version;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+};
+
+/**
+ * Definition of a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command.
+ */
+struct dmub_rb_cmd_replay_set_coasting_vtotal {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command.
+ */
+ struct dmub_cmd_replay_set_coasting_vtotal_data replay_set_coasting_vtotal_data;
+};
+
/**
* Definition of a DMUB_CMD__SET_PSR_POWER_OPT command.
*/
@@ -2565,6 +3090,10 @@ enum hw_lock_client {
*/
HW_LOCK_CLIENT_PSR_SU = 1,
/**
+ * Replay is the client of HW Lock Manager.
+ */
+ HW_LOCK_CLIENT_REPLAY = 4,
+ /**
* Invalid client.
*/
HW_LOCK_CLIENT_INVALID = 0xFFFFFFFF,
@@ -2650,6 +3179,12 @@ enum dmub_cmd_abm_type {
* unregister vertical interrupt after steady state is reached
*/
DMUB_CMD__ABM_PAUSE = 6,
+
+ /**
+ * Save and Restore ABM state. On save we save parameters, and
+ * on restore we update state with passed in data.
+ */
+ DMUB_CMD__ABM_SAVE_RESTORE = 7,
};
/**
@@ -3034,6 +3569,7 @@ struct dmub_cmd_abm_pause_data {
uint8_t pad[1];
};
+
/**
* Definition of a DMUB_CMD__ABM_PAUSE command.
*/
@@ -3050,6 +3586,36 @@ struct dmub_rb_cmd_abm_pause {
};
/**
+ * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
+ */
+struct dmub_rb_cmd_abm_save_restore {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * OTG hw instance
+ */
+ uint8_t otg_inst;
+
+ /**
+ * Enable or disable ABM pause
+ */
+ uint8_t freeze;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t debug;
+
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__ABM_INIT_CONFIG command.
+ */
+ struct dmub_cmd_abm_init_config_data abm_init_config_data;
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__QUERY_FEATURE_CAPS command.
*/
struct dmub_cmd_query_feature_caps_data {
@@ -3487,6 +4053,11 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_pause abm_pause;
/**
+ * Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
+ */
+ struct dmub_rb_cmd_abm_save_restore abm_save_restore;
+
+ /**
* Definition of a DMUB_CMD__DP_AUX_ACCESS command.
*/
struct dmub_rb_cmd_dp_aux_access dp_aux_access;
@@ -3550,6 +4121,26 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command.
*/
struct dmub_rb_cmd_dpia_hpd_int_enable dpia_hpd_int_enable;
+ /**
+ * Definition of a DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command.
+ */
+ struct dmub_rb_cmd_idle_opt_dcn_notify_idle idle_opt_notify_idle;
+ /*
+ * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
+ */
+ struct dmub_rb_cmd_replay_copy_settings replay_copy_settings;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_ENABLE command.
+ */
+ struct dmub_rb_cmd_replay_enable replay_enable;
+ /**
+ * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
+ */
+ struct dmub_rb_cmd_replay_set_power_opt replay_set_power_opt;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_SET_COASTING_VTOTAL command.
+ */
+ struct dmub_rb_cmd_replay_set_coasting_vtotal replay_set_coasting_vtotal;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h
deleted file mode 100644
index 21b02bad696f..000000000000
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_subvp_state.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef DMUB_SUBVP_STATE_H
-#define DMUB_SUBVP_STATE_H
-
-#include "dmub_cmd.h"
-
-#define DMUB_SUBVP_INST0 0
-#define DMUB_SUBVP_INST1 1
-#define SUBVP_MAX_WATERMARK 0xFFFF
-
-struct dmub_subvp_hubp_state {
- uint32_t CURSOR0_0_CURSOR_POSITION;
- uint32_t CURSOR0_0_CURSOR_HOT_SPOT;
- uint32_t CURSOR0_0_CURSOR_DST_OFFSET;
- uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
- uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
- uint32_t CURSOR0_0_CURSOR_SIZE;
- uint32_t CURSOR0_0_CURSOR_CONTROL;
- uint32_t HUBPREQ0_CURSOR_SETTINGS;
- uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH;
- uint32_t HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
- uint32_t HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
-};
-
-enum subvp_error_code {
- DMUB_SUBVP_INVALID_STATE,
- DMUB_SUBVP_INVALID_TRANSITION,
-};
-
-enum subvp_state {
- DMUB_SUBVP_DISABLED,
- DMUB_SUBVP_IDLE,
- DMUB_SUBVP_TRY_ACQUIRE_LOCKS,
- DMUB_SUBVP_WAIT_FOR_LOCKS,
- DMUB_SUBVP_PRECONFIGURE,
- DMUB_SUBVP_PREPARE,
- DMUB_SUBVP_ENABLE,
- DMUB_SUBVP_SWITCHING,
- DMUB_SUBVP_END,
- DMUB_SUBVP_RESTORE,
-};
-
-/* Defines information for SUBVP to handle vertical interrupts. */
-struct dmub_subvp_vertical_interrupt_event {
- /**
- * @inst: Hardware instance of vertical interrupt.
- */
- uint8_t otg_inst;
-
- /**
- * @pad: Align structure to 4 byte boundary.
- */
- uint8_t pad[3];
-
- enum subvp_state curr_state;
-};
-
-struct dmub_subvp_vertical_interrupt_state {
- /**
- * @events: Event list.
- */
- struct dmub_subvp_vertical_interrupt_event events[DMUB_MAX_STREAMS];
-};
-
-struct dmub_subvp_vline_interrupt_event {
-
- uint8_t hubp_inst;
- uint8_t pad[3];
-};
-
-struct dmub_subvp_vline_interrupt_state {
- struct dmub_subvp_vline_interrupt_event events[DMUB_MAX_PLANES];
-};
-
-struct dmub_subvp_interrupt_ctx {
- struct dmub_subvp_vertical_interrupt_state vertical_int;
- struct dmub_subvp_vline_interrupt_state vline_int;
-};
-
-struct dmub_subvp_pipe_state {
- uint32_t pix_clk_100hz;
- uint16_t main_vblank_start;
- uint16_t main_vblank_end;
- uint16_t mall_region_lines;
- uint16_t prefetch_lines;
- uint16_t prefetch_to_mall_start_lines;
- uint16_t processing_delay_lines;
- uint8_t main_pipe_index;
- uint8_t phantom_pipe_index;
- uint16_t htotal; // htotal for main / phantom pipe
- uint16_t vtotal;
- uint16_t optc_underflow_count;
- uint16_t hubp_underflow_count;
- uint8_t pad[2];
-};
-
-/**
- * struct dmub_subvp_vblank_drr_info - Store DRR state when handling
- * SubVP + VBLANK with DRR multi-display case.
- *
- * The info stored in this struct is only valid if drr_in_use = 1.
- */
-struct dmub_subvp_vblank_drr_info {
- uint8_t drr_in_use;
- uint8_t drr_window_size_ms; // DRR window size -- indicates largest VMIN/VMAX adjustment per frame
- uint16_t min_vtotal_supported; // Min VTOTAL that supports switching in VBLANK
- uint16_t max_vtotal_supported; // Max VTOTAL that can still support SubVP static scheduling requirements
- uint16_t prev_vmin; // Store VMIN value before MCLK switch (used to restore after MCLK end)
- uint16_t prev_vmax; // Store VMAX value before MCLK switch (used to restore after MCLK end)
- uint8_t use_ramping; // Use ramping or not
- uint8_t pad[1];
-};
-
-struct dmub_subvp_vblank_pipe_info {
- uint32_t pix_clk_100hz;
- uint16_t vblank_start;
- uint16_t vblank_end;
- uint16_t vstartup_start;
- uint16_t vtotal;
- uint16_t htotal;
- uint8_t pipe_index;
- uint8_t pad[1];
- struct dmub_subvp_vblank_drr_info drr_info; // DRR considered as part of SubVP + VBLANK case
-};
-
-enum subvp_switch_type {
- DMUB_SUBVP_ONLY, // Used for SubVP only, and SubVP + VACTIVE
- DMUB_SUBVP_AND_SUBVP, // 2 SubVP displays
- DMUB_SUBVP_AND_VBLANK,
- DMUB_SUBVP_AND_FPO,
-};
-
-/* SubVP state. */
-struct dmub_subvp_state {
- struct dmub_subvp_pipe_state pipe_state[DMUB_MAX_SUBVP_STREAMS];
- struct dmub_subvp_interrupt_ctx int_ctx;
- struct dmub_subvp_vblank_pipe_info vblank_info;
- enum subvp_state state; // current state
- enum subvp_switch_type switch_type; // enum take up 4 bytes (?)
- uint8_t mclk_pending;
- uint8_t num_subvp_streams;
- uint8_t vertical_int_margin_us;
- uint8_t pstate_allow_width_us;
- uint32_t subvp_mclk_switch_count;
- uint32_t subvp_wait_lock_count;
- uint32_t driver_wait_lock_count;
- uint32_t subvp_vblank_frame_count;
- uint16_t watermark_a_cache;
- uint8_t pad[2];
-};
-
-#endif /* _DMUB_SUBVP_STATE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile
index 0589ad4778ee..caf095aca8f3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/Makefile
+++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile
@@ -22,7 +22,7 @@
DMUB = dmub_srv.o dmub_srv_stat.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o
DMUB += dmub_dcn30.o dmub_dcn301.o dmub_dcn302.o dmub_dcn303.o
-DMUB += dmub_dcn31.o dmub_dcn315.o dmub_dcn316.o
+DMUB += dmub_dcn31.o dmub_dcn314.o dmub_dcn315.o dmub_dcn316.o
DMUB += dmub_dcn32.o
AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index a6540e27044d..98dad0d47e72 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -282,6 +282,11 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
}
+uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_INBOX1_WPTR);
+}
+
uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_RPTR);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index c2e5831ac52c..1df128e57ed3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -202,6 +202,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
+uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub);
+
uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub);
void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
index 51bb9bceb1b1..2d212bc974cc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -54,9 +54,3 @@ const struct dmub_srv_common_regs dmub_srv_dcn21_regs = {
#undef DMUB_SF
};
-/* Shared functions. */
-
-bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub)
-{
- return REG_READ(DMCUB_SCRATCH10) == 0;
-}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
index 6fd5b0cd4ef3..8c4033ae4007 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
@@ -32,8 +32,4 @@
extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs;
-/* Hardware functions. */
-
-bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub);
-
#endif /* _DMUB_DCN21_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index c90b9ee42e12..094e9f864557 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -242,6 +242,11 @@ void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
}
+uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_INBOX1_WPTR);
+}
+
uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_RPTR);
@@ -297,6 +302,11 @@ bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
return supported;
}
+bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub)
+{
+ return dmub->fw_version >= DMUB_FW_VERSION(4, 0, 59);
+}
+
void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
union dmub_gpint_data_register reg)
{
@@ -342,6 +352,14 @@ union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub)
return status;
}
+union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub)
+{
+ union dmub_fw_boot_options option;
+
+ option.all = REG_READ(DMCUB_SCRATCH14);
+ return option;
+}
+
void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
{
union dmub_fw_boot_options boot_options = {0};
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index f6db6f89d45d..4d520a893c7b 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -204,6 +204,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
+uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub);
+
uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub);
void dmub_dcn31_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
@@ -219,6 +221,8 @@ bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub);
bool dmub_dcn31_is_supported(struct dmub_srv *dmub);
+bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub);
+
void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
union dmub_gpint_data_register reg);
@@ -235,6 +239,8 @@ void dmub_dcn31_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip)
union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub);
+union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub);
+
void dmub_dcn31_setup_outbox0(struct dmub_srv *dmub,
const struct dmub_region *outbox0);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c
new file mode 100644
index 000000000000..f161aeb7e7c4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn314.h"
+
+#include "dcn/dcn_3_1_4_offset.h"
+#include "dcn/dcn_3_1_4_sh_mask.h"
+
+#define DCN_BASE__INST0_SEG0 0x00000012
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define DCN_BASE__INST0_SEG2 0x000034C0
+#define DCN_BASE__INST0_SEG3 0x00009000
+#define DCN_BASE__INST0_SEG4 0x02403C00
+#define DCN_BASE__INST0_SEG5 0
+
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
+#define CTX dmub
+#define REGS dmub->regs_dcn31
+#define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
+
+/* Registers. */
+
+const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs = {
+#define DMUB_SR(reg) REG_OFFSET_EXP(reg),
+ {
+ DMUB_DCN31_REGS()
+ DMCUB_INTERNAL_REGS()
+ },
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) FD_MASK(reg, field),
+ { DMUB_DCN31_FIELDS() },
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
+ { DMUB_DCN31_FIELDS() },
+#undef DMUB_SF
+};
+
+bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub)
+{
+ return dmub->fw_version >= DMUB_FW_VERSION(8, 0, 16);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h
new file mode 100644
index 000000000000..f213bd82c911
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_DCN314_H_
+#define _DMUB_DCN314_H_
+
+#include "dmub_dcn31.h"
+
+extern const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs;
+
+bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub);
+
+#endif /* _DMUB_DCN314_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 9c20516be066..bf5994e292d9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -116,10 +116,6 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
break;
}
- /* Clear the GPINT command manually so we don't reset again. */
- cmd.all = 0;
- dmub->hw_funcs.set_gpint(dmub, cmd);
-
/* Force reset in case we timed out, DMCUB is likely hung. */
}
@@ -133,6 +129,10 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
REG_WRITE(DMCUB_OUTBOX0_RPTR, 0);
REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
REG_WRITE(DMCUB_SCRATCH0, 0);
+
+ /* Clear the GPINT command manually so we don't reset again. */
+ cmd.all = 0;
+ dmub->hw_funcs.set_gpint(dmub, cmd);
}
void dmub_dcn32_reset_release(struct dmub_srv *dmub)
@@ -266,6 +266,11 @@ void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
}
+uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_INBOX1_WPTR);
+}
+
uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_RPTR);
@@ -434,6 +439,7 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
+ diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16);
diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
@@ -464,6 +470,8 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
+
+ diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index 7d1a6eb4d665..d58a1e4b9f1c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -107,6 +107,7 @@ struct dmub_srv;
DMUB_SR(DMCUB_SCRATCH15) \
DMUB_SR(DMCUB_SCRATCH16) \
DMUB_SR(DMCUB_SCRATCH17) \
+ DMUB_SR(DMCUB_GPINT_DATAIN0) \
DMUB_SR(DMCUB_GPINT_DATAIN1) \
DMUB_SR(DMCUB_GPINT_DATAOUT) \
DMUB_SR(CC_DC_PIPE_DIS) \
@@ -206,6 +207,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
+uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub);
+
uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub);
void dmub_dcn32_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 92c18bfb98b3..93624ffe4eb8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -32,6 +32,7 @@
#include "dmub_dcn302.h"
#include "dmub_dcn303.h"
#include "dmub_dcn31.h"
+#include "dmub_dcn314.h"
#include "dmub_dcn315.h"
#include "dmub_dcn316.h"
#include "dmub_dcn32.h"
@@ -166,6 +167,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->backdoor_load = dmub_dcn20_backdoor_load;
funcs->setup_windows = dmub_dcn20_setup_windows;
funcs->setup_mailbox = dmub_dcn20_setup_mailbox;
+ funcs->get_inbox1_wptr = dmub_dcn20_get_inbox1_wptr;
funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
funcs->is_supported = dmub_dcn20_is_supported;
@@ -190,11 +192,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data;
- if (asic == DMUB_ASIC_DCN21) {
+ if (asic == DMUB_ASIC_DCN21)
dmub->regs = &dmub_srv_dcn21_regs;
- funcs->is_phy_init = dmub_dcn21_is_phy_init;
- }
if (asic == DMUB_ASIC_DCN30) {
dmub->regs = &dmub_srv_dcn30_regs;
@@ -226,17 +226,23 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
case DMUB_ASIC_DCN314:
case DMUB_ASIC_DCN315:
case DMUB_ASIC_DCN316:
- if (asic == DMUB_ASIC_DCN315)
+ if (asic == DMUB_ASIC_DCN314) {
+ dmub->regs_dcn31 = &dmub_srv_dcn314_regs;
+ funcs->is_psrsu_supported = dmub_dcn314_is_psrsu_supported;
+ } else if (asic == DMUB_ASIC_DCN315) {
dmub->regs_dcn31 = &dmub_srv_dcn315_regs;
- else if (asic == DMUB_ASIC_DCN316)
+ } else if (asic == DMUB_ASIC_DCN316) {
dmub->regs_dcn31 = &dmub_srv_dcn316_regs;
- else
+ } else {
dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
+ funcs->is_psrsu_supported = dmub_dcn31_is_psrsu_supported;
+ }
funcs->reset = dmub_dcn31_reset;
funcs->reset_release = dmub_dcn31_reset_release;
funcs->backdoor_load = dmub_dcn31_backdoor_load;
funcs->setup_windows = dmub_dcn31_setup_windows;
funcs->setup_mailbox = dmub_dcn31_setup_mailbox;
+ funcs->get_inbox1_wptr = dmub_dcn31_get_inbox1_wptr;
funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr;
funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr;
funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox;
@@ -249,6 +255,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_gpint_response = dmub_dcn31_get_gpint_response;
funcs->get_gpint_dataout = dmub_dcn31_get_gpint_dataout;
funcs->get_fw_status = dmub_dcn31_get_fw_boot_status;
+ funcs->get_fw_boot_option = dmub_dcn31_get_fw_boot_option;
funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options;
funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence;
//outbox0 call stacks
@@ -275,6 +282,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->backdoor_load_zfb_mode = dmub_dcn32_backdoor_load_zfb_mode;
funcs->setup_windows = dmub_dcn32_setup_windows;
funcs->setup_mailbox = dmub_dcn32_setup_mailbox;
+ funcs->get_inbox1_wptr = dmub_dcn32_get_inbox1_wptr;
funcs->get_inbox1_rptr = dmub_dcn32_get_inbox1_rptr;
funcs->set_inbox1_wptr = dmub_dcn32_set_inbox1_wptr;
funcs->setup_out_mailbox = dmub_dcn32_setup_out_mailbox;
@@ -632,11 +640,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
if (dmub->hw_funcs.enable_dmub_boot_options)
dmub->hw_funcs.enable_dmub_boot_options(dmub, params);
- if (dmub->hw_funcs.skip_dmub_panel_power_sequence)
+ if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual)
dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub,
params->skip_panel_power_sequence);
- if (dmub->hw_funcs.reset_release)
+ if (dmub->hw_funcs.reset_release && !dmub->is_virtual)
dmub->hw_funcs.reset_release(dmub);
dmub->hw_init = true;
@@ -644,6 +652,20 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
+enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+ dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+ dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
+ }
+
+ return DMUB_STATUS_OK;
+}
+
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
{
if (!dmub->sw_init)
@@ -721,27 +743,6 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
-enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
- uint32_t timeout_us)
-{
- uint32_t i = 0;
-
- if (!dmub->hw_init)
- return DMUB_STATUS_INVALID;
-
- if (!dmub->hw_funcs.is_phy_init)
- return DMUB_STATUS_OK;
-
- for (i = 0; i <= timeout_us; i += 10) {
- if (dmub->hw_funcs.is_phy_init(dmub))
- return DMUB_STATUS_OK;
-
- udelay(10);
- }
-
- return DMUB_STATUS_TIMEOUT;
-}
-
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us)
{
@@ -846,6 +847,32 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
+enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
+ union dmub_fw_boot_options *option)
+{
+ option->all = 0;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_fw_boot_option)
+ *option = dmub->hw_funcs.get_fw_boot_option(dmub);
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
+ bool skip)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual)
+ dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, skip);
+
+ return DMUB_STATUS_OK;
+}
+
enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd)
{
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index f843fc497855..68dfc7968017 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -40,6 +40,7 @@
#define DP_BRANCH_HW_REV_20 0x20
#define DP_DEVICE_ID_38EC11 0x38EC11
+#define DP_DEVICE_ID_BA4159 0xBA4159
#define DP_FORCE_PSRSU_CAPABILITY 0x40F
#define DP_SINK_PSR_ACTIVE_VTOTAL 0x373
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index c062a44db078..914f28e9f224 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -172,6 +172,9 @@ enum dpcd_psr_sink_states {
#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326
#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E
#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F
-#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340
+#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340
+#define DP_SINK_PR_REPLAY_STATUS 0x378
+#define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379
+#define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A
#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index ece97ae0e826..d4cf7ead1d87 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -525,7 +525,7 @@ static inline struct fixed31_32 dc_fixpt_truncate(struct fixed31_32 arg, unsigne
if (negative)
arg.value = -arg.value;
- arg.value &= (~0LL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits);
+ arg.value &= (~0ULL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits);
if (negative)
arg.value = -arg.value;
return arg;
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index cd870af5fd25..1b8ab20f1715 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -53,7 +53,7 @@ enum {
BITS_PER_DP_BYTE = 10,
DATA_EFFICIENCY_8b_10b_x10000 = 8000, /* 80% data efficiency */
DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100 = 97, /* 97% data efficiency when FEC is enabled */
- DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */
+ DATA_EFFICIENCY_128b_132b_x10000 = 9641, /* 96.71% data efficiency x 99.7% downspread factor */
};
enum lttpr_mode {
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index 23a308c3eccb..325c5ba4c82a 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -44,6 +44,34 @@ enum signal_type {
SIGNAL_TYPE_VIRTUAL = (1 << 9), /* Virtual Display */
};
+static inline const char *signal_type_to_string(const int type)
+{
+ switch (type) {
+ case SIGNAL_TYPE_NONE:
+ return "No signal";
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ return "DVI: Single Link";
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ return "DVI: Dual Link";
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return "HDMI: TYPE A";
+ case SIGNAL_TYPE_LVDS:
+ return "LVDS";
+ case SIGNAL_TYPE_RGB:
+ return "RGB";
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ return "Display Port";
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return "Display Port: MST";
+ case SIGNAL_TYPE_EDP:
+ return "Embedded Display Port";
+ case SIGNAL_TYPE_VIRTUAL:
+ return "Virtual";
+ default:
+ return "Unknown";
+ }
+}
+
/* help functions for signal types manipulation */
static inline bool dc_is_hdmi_tmds_signal(enum signal_type signal)
{
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 67a062af3ab0..ff8e5708735d 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -359,7 +359,7 @@ static struct fixed31_32 translate_from_linear_space(
scratch_1 = dc_fixpt_add(one, args->a3);
/* In the first region (first 16 points) and in the
* region delimited by START/END we calculate with
- * full precision to avoid error accumulation.
+ * full precision to avoid error accumulation.
*/
if ((cal_buffer->buffer_index >= PRECISE_LUT_REGION_START &&
cal_buffer->buffer_index <= PRECISE_LUT_REGION_END) ||
@@ -379,8 +379,7 @@ static struct fixed31_32 translate_from_linear_space(
scratch_1 = dc_fixpt_sub(scratch_1, args->a2);
return scratch_1;
- }
- else
+ } else
return dc_fixpt_mul(args->arg, args->a1);
}
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 5c41a4751db4..dbd60811f95d 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -989,6 +989,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
unsigned int refresh_range = 0;
unsigned long long min_refresh_in_uhz = 0;
unsigned long long max_refresh_in_uhz = 0;
+ unsigned long long min_hardware_refresh_in_uhz = 0;
if (mod_freesync == NULL)
return;
@@ -999,7 +1000,13 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
nominal_field_rate_in_uhz =
mod_freesync_calc_nominal_field_rate(stream);
- min_refresh_in_uhz = in_config->min_refresh_in_uhz;
+ if (stream->ctx->dc->caps.max_v_total != 0 && stream->timing.h_total != 0) {
+ min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
+ (stream->timing.h_total * stream->ctx->dc->caps.max_v_total));
+ }
+ /* Limit minimum refresh rate to what can be supported by hardware */
+ min_refresh_in_uhz = min_hardware_refresh_in_uhz > in_config->min_refresh_in_uhz ?
+ min_hardware_refresh_in_uhz : in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
/* Full range may be larger than current video timing, so cap at nominal */
@@ -1137,10 +1144,6 @@ void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync,
if (in_out_vrr->supported &&
in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE) {
- unsigned int oldest_index = plane->time.index + 1;
-
- if (oldest_index >= DC_PLANE_UPDATE_TIMES_MAX)
- oldest_index = 0;
last_render_time_in_us = curr_time_stamp_in_us -
plane->time.prev_update_time_in_us;
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index ec64f19e1786..84f9b412a4f1 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -149,6 +149,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->replay_settings.config.replay_supported)
+ vsc_packet_revision = vsc_packet_rev4;
else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
vsc_packet_revision = vsc_packet_rev2;
@@ -536,6 +538,9 @@ void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream,
case FREESYNC_TYPE_PCON_IN_WHITELIST:
mod_build_adaptive_sync_infopacket_v1(info_packet);
break;
+ case ADAPTIVE_SYNC_TYPE_EDP:
+ mod_build_adaptive_sync_infopacket_v1(info_packet);
+ break;
case ADAPTIVE_SYNC_TYPE_NONE:
case FREESYNC_TYPE_PCON_NOT_IN_WHITELIST:
default:
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 51e76bce92ea..73a2b37fbbd7 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -116,6 +116,27 @@ static const struct abm_parameters * const abm_settings[] = {
abm_settings_config2,
};
+static const struct dm_bl_data_point custom_backlight_curve0[] = {
+ {2, 14}, {4, 16}, {6, 18}, {8, 21}, {10, 23}, {12, 26}, {14, 29}, {16, 32}, {18, 35},
+ {20, 38}, {22, 41}, {24, 44}, {26, 48}, {28, 52}, {30, 55}, {32, 59}, {34, 62},
+ {36, 67}, {38, 71}, {40, 75}, {42, 80}, {44, 84}, {46, 88}, {48, 93}, {50, 98},
+ {52, 103}, {54, 108}, {56, 113}, {58, 118}, {60, 123}, {62, 129}, {64, 135}, {66, 140},
+ {68, 146}, {70, 152}, {72, 158}, {74, 164}, {76, 171}, {78, 177}, {80, 183}, {82, 190},
+ {84, 197}, {86, 204}, {88, 211}, {90, 218}, {92, 225}, {94, 232}, {96, 240}, {98, 247}};
+
+struct custom_backlight_profile {
+ uint8_t ac_level_percentage;
+ uint8_t dc_level_percentage;
+ uint8_t min_input_signal;
+ uint8_t max_input_signal;
+ uint8_t num_data_points;
+ const struct dm_bl_data_point *data_points;
+};
+
+static const struct custom_backlight_profile custom_backlight_profiles[] = {
+ {100, 32, 12, 255, ARRAY_SIZE(custom_backlight_curve0), custom_backlight_curve0},
+};
+
#define NUM_AMBI_LEVEL 5
#define NUM_AGGR_LEVEL 4
#define NUM_POWER_FN_SEGS 8
@@ -905,6 +926,11 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config,
!link->dpcd_caps.psr_info.psr_dpcd_caps.bits.LINK_TRAINING_ON_EXIT_NOT_REQUIRED;
}
+void init_replay_config(struct dc_link *link, struct replay_config *pr_config)
+{
+ link->replay_settings.config = *pr_config;
+}
+
bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_state *stream)
{
return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal);
@@ -944,3 +970,25 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
return true;
}
+
+bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps)
+{
+ unsigned int data_points_size;
+
+ if (config_no >= ARRAY_SIZE(custom_backlight_profiles))
+ return false;
+
+ data_points_size = custom_backlight_profiles[config_no].num_data_points
+ * sizeof(custom_backlight_profiles[config_no].data_points[0]);
+
+ caps->size = sizeof(struct dm_acpi_atif_backlight_caps) - sizeof(caps->data_points) + data_points_size;
+ caps->flags = 0;
+ caps->error_code = 0;
+ caps->ac_level_percentage = custom_backlight_profiles[config_no].ac_level_percentage;
+ caps->dc_level_percentage = custom_backlight_profiles[config_no].dc_level_percentage;
+ caps->min_input_signal = custom_backlight_profiles[config_no].min_input_signal;
+ caps->max_input_signal = custom_backlight_profiles[config_no].max_input_signal;
+ caps->num_data_points = custom_backlight_profiles[config_no].num_data_points;
+ memcpy(caps->data_points, custom_backlight_profiles[config_no].data_points, data_points_size);
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 1d3079e56799..d9e0d67d67f7 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -53,6 +53,8 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
struct dmcu_iram_parameters params,
unsigned int inst);
+void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
+
bool is_psr_su_specific_panel(struct dc_link *link);
void mod_power_calc_psr_configs(struct psr_config *psr_config,
struct dc_link *link,
@@ -62,4 +64,7 @@ bool mod_power_only_edp(const struct dc_state *context,
bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
struct dc_stream_state *stream,
struct psr_config *config);
+
+bool fill_custom_backlight_caps(unsigned int config_no,
+ struct dm_acpi_atif_backlight_caps *caps);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index e4a22c68517d..abe829bbd54a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -240,7 +240,6 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
- DC_ENABLE_SUBVP_DRR = (1 << 9), // 0x200, disabled by default
};
enum DC_DEBUG_MASK {
@@ -251,6 +250,7 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PSR = 0x10,
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
DC_DISABLE_MPO = 0x40,
+ DC_ENABLE_DPIA_TRACE = 0x80,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
index 537aee0536d3..f2f8f9b39c6b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
@@ -15805,6 +15805,11 @@
#define mmDME6_DME_MEMORY_CONTROL 0x093d
#define mmDME6_DME_MEMORY_CONTROL_BASE_IDX 3
+// addressBlock: dce_dc_hpo_hpo_top_dispdec
+// base address: 0x0
+#define mmHPO_TOP_CLOCK_CONTROL 0x0e43
+#define mmHPO_TOP_CLOCK_CONTROL_BASE_IDX 3
+
// base address: 0x1a698
#define mmDC_PERFMON29_PERFCOUNTER_CNTL 0x0e66
#define mmDC_PERFMON29_PERFCOUNTER_CNTL_BASE_IDX 3
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
index f9d90b098519..e0a447351623 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
@@ -60666,7 +60666,12 @@
#define DME6_DME_MEMORY_CONTROL__DME_MEM_PWR_STATE_MASK 0x00000300L
#define DME6_DME_MEMORY_CONTROL__DME_MEM_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+// addressBlock: dce_dc_hpo_hpo_top_dispdec
+//HPO_TOP_CLOCK_CONTROL
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_GATE_DIS__SHIFT 0x9
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_GATE_DIS_MASK 0x00000200L
+// addressBlock: dce_dc_hpo_hpo_dcperfmon_dc_perfmon_dispdec
//DC_PERFMON29_PERFCOUNTER_CNTL
#define DC_PERFMON29_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
#define DC_PERFMON29_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h
index 476469d41d73..b45a35aae241 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h
@@ -14205,6 +14205,10 @@
+// addressBlock: dce_dc_hpo_hpo_top_dispdec
+// base address: 0x0
+#define mmHPO_TOP_CLOCK_CONTROL 0x0e43
+#define mmHPO_TOP_CLOCK_CONTROL_BASE_IDX 3
// base address: 0x1a698
#define mmDC_PERFMON26_PERFCOUNTER_CNTL 0x0e66
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h
index b9de0ebc8b03..3dae29f9581e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h
@@ -52401,7 +52401,10 @@
#define DC_PERFMON25_PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
#define DC_PERFMON25_PERFMON_LOW__PERFMON_LOW_MASK 0xFFFFFFFFL
-
+// addressBlock: dce_dc_hpo_hpo_top_dispdec
+//HPO_TOP_CLOCK_CONTROL
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_GATE_DIS__SHIFT 0x9
+#define HPO_TOP_CLOCK_CONTROL__HPO_HDMISTREAMCLK_GATE_DIS_MASK 0x00000200L
// addressBlock: dce_dc_hpo_hpo_dcperfmon_dc_perfmon_dispdec
//DC_PERFMON26_PERFCOUNTER_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
index 79c41004c0b6..4908044f7409 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h
@@ -5194,6 +5194,20 @@
#define mmSPI_WCL_PIPE_PERCENT_CS6_BASE_IDX 0
#define mmSPI_WCL_PIPE_PERCENT_CS7 0x1f70
#define mmSPI_WCL_PIPE_PERCENT_CS7_BASE_IDX 0
+#define mmSPI_GDBG_WAVE_CNTL 0x1f71
+#define mmSPI_GDBG_WAVE_CNTL_BASE_IDX 0
+#define mmSPI_GDBG_TRAP_CONFIG 0x1f72
+#define mmSPI_GDBG_TRAP_CONFIG_BASE_IDX 0
+#define mmSPI_GDBG_TRAP_MASK 0x1f73
+#define mmSPI_GDBG_TRAP_MASK_BASE_IDX 0
+#define mmSPI_GDBG_WAVE_CNTL2 0x1f74
+#define mmSPI_GDBG_WAVE_CNTL2_BASE_IDX 0
+#define mmSPI_GDBG_WAVE_CNTL3 0x1f75
+#define mmSPI_GDBG_WAVE_CNTL3_BASE_IDX 0
+#define mmSPI_GDBG_TRAP_DATA0 0x1f78
+#define mmSPI_GDBG_TRAP_DATA0_BASE_IDX 0
+#define mmSPI_GDBG_TRAP_DATA1 0x1f79
+#define mmSPI_GDBG_TRAP_DATA1_BASE_IDX 0
#define mmSPI_COMPUTE_QUEUE_RESET 0x1f7b
#define mmSPI_COMPUTE_QUEUE_RESET_BASE_IDX 0
#define mmSPI_RESOURCE_RESERVE_CU_0 0x1f7c
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
index 52043e143067..9b7d219e7954 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
@@ -19700,6 +19700,75 @@
//SPI_WCL_PIPE_PERCENT_CS7
#define SPI_WCL_PIPE_PERCENT_CS7__VALUE__SHIFT 0x0
#define SPI_WCL_PIPE_PERCENT_CS7__VALUE_MASK 0x7FL
+//SPI_GDBG_WAVE_CNTL
+#define SPI_GDBG_WAVE_CNTL__STALL_RA__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL__STALL_VMID__SHIFT 0x1
+#define SPI_GDBG_WAVE_CNTL__STALL_RA_MASK 0x00000001L
+#define SPI_GDBG_WAVE_CNTL__STALL_VMID_MASK 0x0001FFFEL
+//SPI_GDBG_TRAP_CONFIG
+#define SPI_GDBG_TRAP_CONFIG__ME_SEL__SHIFT 0x0
+#define SPI_GDBG_TRAP_CONFIG__PIPE_SEL__SHIFT 0x2
+#define SPI_GDBG_TRAP_CONFIG__QUEUE_SEL__SHIFT 0x4
+#define SPI_GDBG_TRAP_CONFIG__ME_MATCH__SHIFT 0x7
+#define SPI_GDBG_TRAP_CONFIG__PIPE_MATCH__SHIFT 0x8
+#define SPI_GDBG_TRAP_CONFIG__QUEUE_MATCH__SHIFT 0x9
+#define SPI_GDBG_TRAP_CONFIG__TRAP_EN__SHIFT 0xf
+#define SPI_GDBG_TRAP_CONFIG__VMID_SEL__SHIFT 0x10
+#define SPI_GDBG_TRAP_CONFIG__ME_SEL_MASK 0x00000003L
+#define SPI_GDBG_TRAP_CONFIG__PIPE_SEL_MASK 0x0000000CL
+#define SPI_GDBG_TRAP_CONFIG__QUEUE_SEL_MASK 0x00000070L
+#define SPI_GDBG_TRAP_CONFIG__ME_MATCH_MASK 0x00000080L
+#define SPI_GDBG_TRAP_CONFIG__PIPE_MATCH_MASK 0x00000100L
+#define SPI_GDBG_TRAP_CONFIG__QUEUE_MATCH_MASK 0x00000200L
+#define SPI_GDBG_TRAP_CONFIG__TRAP_EN_MASK 0x00008000L
+#define SPI_GDBG_TRAP_CONFIG__VMID_SEL_MASK 0xFFFF0000L
+//SPI_GDBG_TRAP_MASK
+#define SPI_GDBG_TRAP_MASK__EXCP_EN__SHIFT 0x0
+#define SPI_GDBG_TRAP_MASK__REPLACE__SHIFT 0x9
+#define SPI_GDBG_TRAP_MASK__EXCP_EN_MASK 0x01FFL
+#define SPI_GDBG_TRAP_MASK__REPLACE_MASK 0x0200L
+//SPI_GDBG_WAVE_CNTL2
+#define SPI_GDBG_WAVE_CNTL2__VMID_MASK__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL2__MODE__SHIFT 0x10
+#define SPI_GDBG_WAVE_CNTL2__VMID_MASK_MASK 0x0000FFFFL
+#define SPI_GDBG_WAVE_CNTL2__MODE_MASK 0x00030000L
+//SPI_GDBG_WAVE_CNTL3
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL3__STALL_VS__SHIFT 0x1
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS__SHIFT 0x2
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS__SHIFT 0x3
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG__SHIFT 0x4
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0__SHIFT 0x5
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1__SHIFT 0x6
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2__SHIFT 0x7
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3__SHIFT 0x8
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4__SHIFT 0x9
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5__SHIFT 0xa
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6__SHIFT 0xb
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7__SHIFT 0xc
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION__SHIFT 0xd
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT__SHIFT 0x1c
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS_MASK 0x00000001L
+#define SPI_GDBG_WAVE_CNTL3__STALL_VS_MASK 0x00000002L
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS_MASK 0x00000004L
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS_MASK 0x00000008L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG_MASK 0x00000010L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0_MASK 0x00000020L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1_MASK 0x00000040L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2_MASK 0x00000080L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3_MASK 0x00000100L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4_MASK 0x00000200L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5_MASK 0x00000400L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6_MASK 0x00000800L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7_MASK 0x00001000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION_MASK 0x0FFFE000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT_MASK 0x10000000L
+//SPI_GDBG_TRAP_DATA0
+#define SPI_GDBG_TRAP_DATA0__DATA__SHIFT 0x0
+#define SPI_GDBG_TRAP_DATA0__DATA_MASK 0xFFFFFFFFL
+//SPI_GDBG_TRAP_DATA1
+#define SPI_GDBG_TRAP_DATA1__DATA__SHIFT 0x0
+#define SPI_GDBG_TRAP_DATA1__DATA_MASK 0xFFFFFFFFL
//SPI_COMPUTE_QUEUE_RESET
#define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0
#define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
index a734abaa91a5..5e15ac14b63c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
@@ -26,6 +26,8 @@
#define mmSQ_DEBUG_STS_GLOBAL_BASE_IDX 0
#define mmSQ_DEBUG_STS_GLOBAL2 0x10B0
#define mmSQ_DEBUG_STS_GLOBAL2_BASE_IDX 0
+#define mmSQ_DEBUG 0x10B1
+#define mmSQ_DEBUG_BASE_IDX 0
// addressBlock: gc_sdma0_sdma0dec
// base address: 0x4980
@@ -4853,10 +4855,18 @@
#define mmSPI_WCL_PIPE_PERCENT_CS3_BASE_IDX 0
#define mmSPI_GDBG_WAVE_CNTL 0x1f71
#define mmSPI_GDBG_WAVE_CNTL_BASE_IDX 0
+#define mmSPI_GDBG_TRAP_CONFIG 0x1f72
+#define mmSPI_GDBG_TRAP_CONFIG_BASE_IDX 0
#define mmSPI_GDBG_TRAP_MASK 0x1f73
#define mmSPI_GDBG_TRAP_MASK_BASE_IDX 0
#define mmSPI_GDBG_WAVE_CNTL2 0x1f74
#define mmSPI_GDBG_WAVE_CNTL2_BASE_IDX 0
+#define mmSPI_GDBG_WAVE_CNTL3 0x1f75
+#define mmSPI_GDBG_WAVE_CNTL3_BASE_IDX 0
+#define mmSPI_GDBG_TRAP_DATA0 0x1f78
+#define mmSPI_GDBG_TRAP_DATA0_BASE_IDX 0
+#define mmSPI_GDBG_TRAP_DATA1 0x1f79
+#define mmSPI_GDBG_TRAP_DATA1_BASE_IDX 0
#define mmSPI_COMPUTE_QUEUE_RESET 0x1f7b
#define mmSPI_COMPUTE_QUEUE_RESET_BASE_IDX 0
#define mmSPI_RESOURCE_RESERVE_CU_0 0x1f7c
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
index d7a17bae2584..e4ecd6c2d20e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
@@ -47907,6 +47907,10 @@
// addressBlock: sqind
+//SQ_DEBUG
+#define SQ_DEBUG__SINGLE_MEMOP_MASK 0x00000001L
+#define SQ_DEBUG__SINGLE_MEMOP__SHIFT 0x00000000
+
//SQ_DEBUG_STS_GLOBAL
#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0_MASK 0x000000ffL
#define SQ_DEBUG_STS_GLOBAL2__FIFO_LEVEL_GFX0__SHIFT 0x00000000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h
index 4f08f90856fc..3088a4a13cb5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h
@@ -17216,11 +17216,15 @@
#define SPI_GDBG_PER_VMID_CNTL__TRAP_EN__SHIFT 0x3
#define SPI_GDBG_PER_VMID_CNTL__EXCP_EN__SHIFT 0x4
#define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE__SHIFT 0xd
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_START__SHIFT 0xe
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_END__SHIFT 0xf
#define SPI_GDBG_PER_VMID_CNTL__STALL_VMID_MASK 0x00000001L
#define SPI_GDBG_PER_VMID_CNTL__LAUNCH_MODE_MASK 0x00000006L
#define SPI_GDBG_PER_VMID_CNTL__TRAP_EN_MASK 0x00000008L
#define SPI_GDBG_PER_VMID_CNTL__EXCP_EN_MASK 0x00001FF0L
#define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE_MASK 0x00002000L
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_START_MASK 0x00004000L
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_ON_END_MASK 0x00008000L
//SPI_COMPUTE_QUEUE_RESET
#define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0
#define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h
index 3100de8b3881..393963502b7a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h
@@ -705,6 +705,46 @@
#define regSQC_ICACHE_UTCL1_STATUS_BASE_IDX 0
#define regSQC_DCACHE_UTCL1_STATUS 0x03d8
#define regSQC_DCACHE_UTCL1_STATUS_BASE_IDX 0
+#define regSQC_UE_EDC_LO 0x03d9
+#define regSQC_UE_EDC_LO_BASE_IDX 0
+#define regSQC_UE_EDC_HI 0x03da
+#define regSQC_UE_EDC_HI_BASE_IDX 0
+#define regSQC_CE_EDC_LO 0x03db
+#define regSQC_CE_EDC_LO_BASE_IDX 0
+#define regSQC_CE_EDC_HI 0x03dc
+#define regSQC_CE_EDC_HI_BASE_IDX 0
+#define regSQ_UE_ERR_STATUS_LO 0x03dd
+#define regSQ_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regSQ_UE_ERR_STATUS_HI 0x03de
+#define regSQ_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regSQ_CE_ERR_STATUS_LO 0x03df
+#define regSQ_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regSQ_CE_ERR_STATUS_HI 0x03e0
+#define regSQ_CE_ERR_STATUS_HI_BASE_IDX 0
+#define regLDS_UE_ERR_STATUS_LO 0x03e1
+#define regLDS_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regLDS_UE_ERR_STATUS_HI 0x03e2
+#define regLDS_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regLDS_CE_ERR_STATUS_LO 0x03e3
+#define regLDS_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regLDS_CE_ERR_STATUS_HI 0x03e4
+#define regLDS_CE_ERR_STATUS_HI_BASE_IDX 0
+#define regSP0_UE_ERR_STATUS_LO 0x03e5
+#define regSP0_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regSP0_UE_ERR_STATUS_HI 0x03e6
+#define regSP0_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regSP0_CE_ERR_STATUS_LO 0x03e7
+#define regSP0_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regSP0_CE_ERR_STATUS_HI 0x03e8
+#define regSP0_CE_ERR_STATUS_HI_BASE_IDX 0
+#define regSP1_UE_ERR_STATUS_LO 0x03e9
+#define regSP1_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regSP1_UE_ERR_STATUS_HI 0x03ea
+#define regSP1_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regSP1_CE_ERR_STATUS_LO 0x03eb
+#define regSP1_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regSP1_CE_ERR_STATUS_HI 0x03ec
+#define regSP1_CE_ERR_STATUS_HI_BASE_IDX 0
// addressBlock: xcd0_gc_shsdec
@@ -727,6 +767,14 @@
#define regSPI_DSM_CNTL2_BASE_IDX 0
#define regSPI_EDC_CNT 0x0445
#define regSPI_EDC_CNT_BASE_IDX 0
+#define regSPI_UE_ERR_STATUS_LO 0x0446
+#define regSPI_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regSPI_UE_ERR_STATUS_HI 0x0447
+#define regSPI_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regSPI_CE_ERR_STATUS_LO 0x0448
+#define regSPI_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regSPI_CE_ERR_STATUS_HI 0x0449
+#define regSPI_CE_ERR_STATUS_HI_BASE_IDX 0
#define regSPI_DEBUG_BUSY 0x0450
#define regSPI_DEBUG_BUSY_BASE_IDX 0
#define regSPI_CONFIG_PS_CU_EN 0x0452
@@ -871,6 +919,14 @@
#define regTD_STATUS_BASE_IDX 0
#define regTD_POWER_CNTL 0x052a
#define regTD_POWER_CNTL_BASE_IDX 0
+#define regTD_UE_EDC_LO 0x052b
+#define regTD_UE_EDC_LO_BASE_IDX 0
+#define regTD_UE_EDC_HI 0x052c
+#define regTD_UE_EDC_HI_BASE_IDX 0
+#define regTD_CE_EDC_LO 0x052d
+#define regTD_CE_EDC_LO_BASE_IDX 0
+#define regTD_CE_EDC_HI 0x052e
+#define regTD_CE_EDC_HI_BASE_IDX 0
#define regTD_DSM_CNTL 0x052f
#define regTD_DSM_CNTL_BASE_IDX 0
#define regTD_DSM_CNTL2 0x0530
@@ -893,6 +949,14 @@
#define regTA_DSM_CNTL_BASE_IDX 0
#define regTA_DSM_CNTL2 0x0585
#define regTA_DSM_CNTL2_BASE_IDX 0
+#define regTA_UE_EDC_LO 0x0587
+#define regTA_UE_EDC_LO_BASE_IDX 0
+#define regTA_UE_EDC_HI 0x0588
+#define regTA_UE_EDC_HI_BASE_IDX 0
+#define regTA_CE_EDC_LO 0x0589
+#define regTA_CE_EDC_LO_BASE_IDX 0
+#define regTA_CE_EDC_HI 0x058a
+#define regTA_CE_EDC_HI_BASE_IDX 0
// addressBlock: xcd0_gc_gdsdec
@@ -923,6 +987,14 @@
#define regGDS_DSM_CNTL2_BASE_IDX 0
#define regGDS_WD_GDS_CSB 0x05ce
#define regGDS_WD_GDS_CSB_BASE_IDX 0
+#define regGDS_UE_ERR_STATUS_LO 0x05cf
+#define regGDS_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regGDS_UE_ERR_STATUS_HI 0x05d0
+#define regGDS_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regGDS_CE_ERR_STATUS_LO 0x05d1
+#define regGDS_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regGDS_CE_ERR_STATUS_HI 0x05d2
+#define regGDS_CE_ERR_STATUS_HI_BASE_IDX 0
// addressBlock: xcd0_gc_rbdec
@@ -1243,6 +1315,10 @@
#define regGCEA_MAM_CTRL_BASE_IDX 0
#define regGCEA_MAM_CTRL2 0x0702
#define regGCEA_MAM_CTRL2_BASE_IDX 0
+#define regGCEA_UE_ERR_STATUS_LO 0x0706
+#define regGCEA_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regGCEA_UE_ERR_STATUS_HI 0x0707
+#define regGCEA_UE_ERR_STATUS_HI_BASE_IDX 0
#define regGCEA_DSM_CNTL 0x0708
#define regGCEA_DSM_CNTL_BASE_IDX 0
#define regGCEA_DSM_CNTLA 0x0709
@@ -1277,6 +1353,10 @@
#define regGCEA_SDP_BACKDOOR_DATACREDITS1_BASE_IDX 0
#define regGCEA_SDP_BACKDOOR_MISCCREDITS 0x0719
#define regGCEA_SDP_BACKDOOR_MISCCREDITS_BASE_IDX 0
+#define regGCEA_CE_ERR_STATUS_LO 0x071b
+#define regGCEA_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regGCEA_CE_ERR_STATUS_HI 0x071d
+#define regGCEA_CE_ERR_STATUS_HI_BASE_IDX 0
#define regGCEA_SDP_ENABLE 0x071f
#define regGCEA_SDP_ENABLE_BASE_IDX 0
@@ -1389,6 +1469,14 @@
#define regATC_L2_CNTL4_BASE_IDX 0
#define regATC_L2_MM_GROUP_RT_CLASSES 0x0816
#define regATC_L2_MM_GROUP_RT_CLASSES_BASE_IDX 0
+#define regATC_L2_UE_ERR_STATUS_LO 0x081a
+#define regATC_L2_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regATC_L2_UE_ERR_STATUS_HI 0x081b
+#define regATC_L2_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regATC_L2_CE_ERR_STATUS_LO 0x081c
+#define regATC_L2_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regATC_L2_CE_ERR_STATUS_HI 0x081d
+#define regATC_L2_CE_ERR_STATUS_HI_BASE_IDX 0
// addressBlock: xcd0_gc_utcl2_vml2pfdec
@@ -1475,6 +1563,30 @@
#define regUTCL2_EDC_MODE_BASE_IDX 0
#define regUTCL2_EDC_CONFIG 0x084c
#define regUTCL2_EDC_CONFIG_BASE_IDX 0
+#define regVML2_UE_ERR_STATUS_LO 0x084d
+#define regVML2_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regVML2_WALKER_UE_ERR_STATUS_LO 0x084e
+#define regVML2_WALKER_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regUTCL2_UE_ERR_STATUS_LO 0x084f
+#define regUTCL2_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regVML2_UE_ERR_STATUS_HI 0x0850
+#define regVML2_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regVML2_WALKER_UE_ERR_STATUS_HI 0x0851
+#define regVML2_WALKER_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regUTCL2_UE_ERR_STATUS_HI 0x0852
+#define regUTCL2_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regVML2_CE_ERR_STATUS_LO 0x0853
+#define regVML2_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regVML2_WALKER_CE_ERR_STATUS_LO 0x0854
+#define regVML2_WALKER_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regUTCL2_CE_ERR_STATUS_LO 0x0855
+#define regUTCL2_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regVML2_CE_ERR_STATUS_HI 0x0856
+#define regVML2_CE_ERR_STATUS_HI_BASE_IDX 0
+#define regVML2_WALKER_CE_ERR_STATUS_HI 0x0857
+#define regVML2_WALKER_CE_ERR_STATUS_HI_BASE_IDX 0
+#define regUTCL2_CE_ERR_STATUS_HI 0x0858
+#define regUTCL2_CE_ERR_STATUS_HI_BASE_IDX 0
// addressBlock: xcd0_gc_utcl2_vml2vcdec
@@ -2011,6 +2123,22 @@
#define regTC_CFG_L1_VOLATILE_BASE_IDX 0
#define regTC_CFG_L2_VOLATILE 0x0b23
#define regTC_CFG_L2_VOLATILE_BASE_IDX 0
+#define regTCP_UE_EDC_HI_REG 0x0b54
+#define regTCP_UE_EDC_HI_REG_BASE_IDX 0
+#define regTCP_UE_EDC_LO_REG 0x0b55
+#define regTCP_UE_EDC_LO_REG_BASE_IDX 0
+#define regTCP_CE_EDC_HI_REG 0x0b56
+#define regTCP_CE_EDC_HI_REG_BASE_IDX 0
+#define regTCP_CE_EDC_LO_REG 0x0b57
+#define regTCP_CE_EDC_LO_REG_BASE_IDX 0
+#define regTCI_UE_EDC_HI_REG 0x0b58
+#define regTCI_UE_EDC_HI_REG_BASE_IDX 0
+#define regTCI_UE_EDC_LO_REG 0x0b59
+#define regTCI_UE_EDC_LO_REG_BASE_IDX 0
+#define regTCI_CE_EDC_HI_REG 0x0b5a
+#define regTCI_CE_EDC_HI_REG_BASE_IDX 0
+#define regTCI_CE_EDC_LO_REG 0x0b5b
+#define regTCI_CE_EDC_LO_REG_BASE_IDX 0
#define regTCI_MISC 0x0b5c
#define regTCI_MISC_BASE_IDX 0
#define regTCI_CNTL_3 0x0b5d
@@ -2061,6 +2189,26 @@
#define regTCX_DSM_CNTL_BASE_IDX 0
#define regTCX_DSM_CNTL2 0x0bc8
#define regTCX_DSM_CNTL2_BASE_IDX 0
+#define regTCA_UE_ERR_STATUS_LO 0x0bc9
+#define regTCA_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regTCA_UE_ERR_STATUS_HI 0x0bca
+#define regTCA_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regTCX_UE_ERR_STATUS_LO 0x0bcb
+#define regTCX_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regTCX_UE_ERR_STATUS_HI 0x0bcc
+#define regTCX_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regTCX_CE_ERR_STATUS_LO 0x0bcd
+#define regTCX_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regTCX_CE_ERR_STATUS_HI 0x0bce
+#define regTCX_CE_ERR_STATUS_HI_BASE_IDX 0
+#define regTCC_UE_ERR_STATUS_LO 0x0bcf
+#define regTCC_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regTCC_UE_ERR_STATUS_HI 0x0bd0
+#define regTCC_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regTCC_CE_ERR_STATUS_LO 0x0bd1
+#define regTCC_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regTCC_CE_ERR_STATUS_HI 0x0bd2
+#define regTCC_CE_ERR_STATUS_HI_BASE_IDX 0
// addressBlock: xcd0_gc_shdec
@@ -2905,6 +3053,30 @@
#define regCP_MEC2_F32_INT_DIS_BASE_IDX 0
#define regCP_VMID_STATUS 0x10bf
#define regCP_VMID_STATUS_BASE_IDX 0
+#define regCPC_UE_ERR_STATUS_LO 0x10e0
+#define regCPC_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regCPC_UE_ERR_STATUS_HI 0x10e1
+#define regCPC_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regCPC_CE_ERR_STATUS_LO 0x10e2
+#define regCPC_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regCPC_CE_ERR_STATUS_HI 0x10e3
+#define regCPC_CE_ERR_STATUS_HI_BASE_IDX 0
+#define regCPF_UE_ERR_STATUS_LO 0x10e4
+#define regCPF_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regCPF_UE_ERR_STATUS_HI 0x10e5
+#define regCPF_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regCPF_CE_ERR_STATUS_LO 0x10e6
+#define regCPF_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regCPF_CE_ERR_STATUS_HI 0x10e7
+#define regCPF_CE_ERR_STATUS_HI_BASE_IDX 0
+#define regCPG_UE_ERR_STATUS_LO 0x10e8
+#define regCPG_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regCPG_UE_ERR_STATUS_HI 0x10e9
+#define regCPG_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regCPG_CE_ERR_STATUS_LO 0x10ea
+#define regCPG_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regCPG_CE_ERR_STATUS_HI 0x10eb
+#define regCPG_CE_ERR_STATUS_HI_BASE_IDX 0
// addressBlock: xcd0_gc_cppdec2
@@ -5364,6 +5536,18 @@
#define regSPI_WAVE_LIMIT_CNTL 0x2443
#define regSPI_WAVE_LIMIT_CNTL_BASE_IDX 1
+// addressBlock: xcd0_gc_gccanedec
+// base address: 0x33d00
+#define regGC_CANE_ERR_STATUS 0x2f4d
+#define regGC_CANE_ERR_STATUS_BASE_IDX 1
+#define regGC_CANE_UE_ERR_STATUS_LO 0x2f4e
+#define regGC_CANE_UE_ERR_STATUS_LO_BASE_IDX 1
+#define regGC_CANE_UE_ERR_STATUS_HI 0x2f4f
+#define regGC_CANE_UE_ERR_STATUS_HI_BASE_IDX 1
+#define regGC_CANE_CE_ERR_STATUS_LO 0x2f50
+#define regGC_CANE_CE_ERR_STATUS_LO_BASE_IDX 1
+#define regGC_CANE_CE_ERR_STATUS_HI 0x2f51
+#define regGC_CANE_CE_ERR_STATUS_HI_BASE_IDX 1
// addressBlock: xcd0_gc_perfddec
// base address: 0x34000
@@ -6583,6 +6767,10 @@
#define regRLC_CAPTURE_GPU_CLOCK_COUNT_2_BASE_IDX 1
#define regRLC_CPG_STAT_INVAL 0x4d09
#define regRLC_CPG_STAT_INVAL_BASE_IDX 1
+#define regRLC_UE_ERR_STATUS_LOW 0x4d40
+#define regRLC_UE_ERR_STATUS_LOW_BASE_IDX 1
+#define regRLC_UE_ERR_STATUS_HIGH 0x4d41
+#define regRLC_UE_ERR_STATUS_HIGH_BASE_IDX 1
#define regRLC_DSM_CNTL 0x4d42
#define regRLC_DSM_CNTL_BASE_IDX 1
#define regRLC_DSM_CNTLA 0x4d43
@@ -6591,6 +6779,10 @@
#define regRLC_DSM_CNTL2_BASE_IDX 1
#define regRLC_DSM_CNTL2A 0x4d45
#define regRLC_DSM_CNTL2A_BASE_IDX 1
+#define regRLC_CE_ERR_STATUS_LOW 0x4d49
+#define regRLC_CE_ERR_STATUS_LOW_BASE_IDX 1
+#define regRLC_CE_ERR_STATUS_HIGH 0x4d4a
+#define regRLC_CE_ERR_STATUS_HIGH_BASE_IDX 1
#define regRLC_RLCV_SPARE_INT 0x4f30
#define regRLC_RLCV_SPARE_INT_BASE_IDX 1
#define regRLC_SMU_CLK_REQ 0x4f97
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
index 84a75b58347f..2bd9f3f1026f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h
@@ -4129,6 +4129,240 @@
#define SQC_DCACHE_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
#define SQC_DCACHE_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
#define SQC_DCACHE_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+//SQC_UE_EDC_LO
+#define SQC_UE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define SQC_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1
+#define SQC_UE_EDC_LO__ADDRESS__SHIFT 0x2
+#define SQC_UE_EDC_LO__MEM_ID__SHIFT 0x18
+#define SQC_UE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define SQC_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L
+#define SQC_UE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL
+#define SQC_UE_EDC_LO__MEM_ID_MASK 0xFF000000L
+//SQC_UE_EDC_HI
+#define SQC_UE_EDC_HI__ECC__SHIFT 0x0
+#define SQC_UE_EDC_HI__PARITY__SHIFT 0x1
+#define SQC_UE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define SQC_UE_EDC_HI__ERR_INFO__SHIFT 0x3
+#define SQC_UE_EDC_HI__UE_CNT__SHIFT 0x17
+#define SQC_UE_EDC_HI__FED_CNT__SHIFT 0x1a
+#define SQC_UE_EDC_HI__ECC_MASK 0x00000001L
+#define SQC_UE_EDC_HI__PARITY_MASK 0x00000002L
+#define SQC_UE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define SQC_UE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L
+#define SQC_UE_EDC_HI__UE_CNT_MASK 0x03800000L
+#define SQC_UE_EDC_HI__FED_CNT_MASK 0x1C000000L
+//SQC_CE_EDC_LO
+#define SQC_CE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define SQC_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1
+#define SQC_CE_EDC_LO__ADDRESS__SHIFT 0x2
+#define SQC_CE_EDC_LO__MEM_ID__SHIFT 0x18
+#define SQC_CE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define SQC_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L
+#define SQC_CE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL
+#define SQC_CE_EDC_LO__MEM_ID_MASK 0xFF000000L
+//SQC_CE_EDC_HI
+#define SQC_CE_EDC_HI__ECC__SHIFT 0x0
+#define SQC_CE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define SQC_CE_EDC_HI__ERR_INFO__SHIFT 0x3
+#define SQC_CE_EDC_HI__CE_CNT__SHIFT 0x17
+#define SQC_CE_EDC_HI__POSION__SHIFT 0x1a
+#define SQC_CE_EDC_HI__ECC_MASK 0x00000001L
+#define SQC_CE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define SQC_CE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L
+#define SQC_CE_EDC_HI__CE_CNT_MASK 0x03800000L
+#define SQC_CE_EDC_HI__POSION_MASK 0x04000000L
+//SQ_UE_ERR_STATUS_LO
+#define SQ_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define SQ_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define SQ_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define SQ_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define SQ_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define SQ_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define SQ_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define SQ_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//SQ_UE_ERR_STATUS_HI
+#define SQ_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define SQ_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define SQ_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define SQ_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define SQ_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define SQ_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define SQ_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define SQ_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define SQ_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define SQ_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define SQ_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define SQ_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define SQ_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define SQ_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//SQ_CE_ERR_STATUS_LO
+#define SQ_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define SQ_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define SQ_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define SQ_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define SQ_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define SQ_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define SQ_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define SQ_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//SQ_CE_ERR_STATUS_HI
+#define SQ_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define SQ_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define SQ_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define SQ_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define SQ_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define SQ_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define SQ_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define SQ_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define SQ_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define SQ_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define SQ_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define SQ_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define SQ_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define SQ_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
+//LDS_UE_ERR_STATUS_LO
+#define LDS_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define LDS_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define LDS_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define LDS_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define LDS_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define LDS_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define LDS_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define LDS_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//LDS_UE_ERR_STATUS_HI
+#define LDS_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define LDS_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define LDS_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define LDS_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define LDS_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define LDS_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define LDS_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define LDS_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define LDS_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define LDS_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define LDS_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define LDS_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define LDS_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define LDS_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//LDS_CE_ERR_STATUS_LO
+#define LDS_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define LDS_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define LDS_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define LDS_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define LDS_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define LDS_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define LDS_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define LDS_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//LDS_CE_ERR_STATUS_HI
+#define LDS_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define LDS_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define LDS_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define LDS_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define LDS_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define LDS_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define LDS_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define LDS_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define LDS_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define LDS_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define LDS_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define LDS_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define LDS_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define LDS_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
+//SP0_UE_ERR_STATUS_LO
+#define SP0_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define SP0_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define SP0_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define SP0_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define SP0_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define SP0_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define SP0_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define SP0_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//SP0_UE_ERR_STATUS_HI
+#define SP0_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define SP0_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define SP0_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define SP0_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define SP0_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define SP0_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define SP0_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define SP0_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define SP0_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define SP0_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define SP0_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define SP0_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define SP0_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define SP0_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//SP0_CE_ERR_STATUS_LO
+#define SP0_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define SP0_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define SP0_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define SP0_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define SP0_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define SP0_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define SP0_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define SP0_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//SP0_CE_ERR_STATUS_HI
+#define SP0_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define SP0_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define SP0_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define SP0_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define SP0_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define SP0_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define SP0_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define SP0_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define SP0_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define SP0_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define SP0_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define SP0_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define SP0_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define SP0_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
+//SP1_UE_ERR_STATUS_LO
+#define SP1_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define SP1_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define SP1_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define SP1_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define SP1_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define SP1_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define SP1_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define SP1_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//SP1_UE_ERR_STATUS_HI
+#define SP1_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define SP1_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define SP1_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define SP1_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define SP1_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define SP1_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define SP1_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define SP1_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define SP1_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define SP1_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define SP1_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define SP1_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define SP1_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define SP1_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//SP1_CE_ERR_STATUS_LO
+#define SP1_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define SP1_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define SP1_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define SP1_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define SP1_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define SP1_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define SP1_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define SP1_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//SP1_CE_ERR_STATUS_HI
+#define SP1_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define SP1_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define SP1_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define SP1_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define SP1_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define SP1_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define SP1_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define SP1_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define SP1_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define SP1_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define SP1_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define SP1_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define SP1_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define SP1_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
// addressBlock: xcd0_gc_shsdec
@@ -4235,6 +4469,54 @@
#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT_MASK 0x00030000L
#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT_MASK 0x000C0000L
#define SPI_EDC_CNT__UNUSED_MASK 0xFFF00000L
+//SPI_UE_ERR_STATUS_LO
+#define SPI_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define SPI_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define SPI_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define SPI_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define SPI_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define SPI_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define SPI_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define SPI_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//SPI_UE_ERR_STATUS_HI
+#define SPI_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define SPI_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define SPI_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define SPI_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define SPI_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define SPI_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define SPI_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define SPI_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define SPI_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define SPI_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define SPI_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define SPI_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define SPI_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define SPI_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//SPI_CE_ERR_STATUS_LO
+#define SPI_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define SPI_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define SPI_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define SPI_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define SPI_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define SPI_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define SPI_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define SPI_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//SPI_CE_ERR_STATUS_HI
+#define SPI_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define SPI_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define SPI_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define SPI_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define SPI_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define SPI_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define SPI_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define SPI_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define SPI_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define SPI_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define SPI_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define SPI_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define SPI_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define SPI_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
//SPI_DEBUG_BUSY
#define SPI_DEBUG_BUSY__HS_BUSY__SHIFT 0x0
#define SPI_DEBUG_BUSY__GS_BUSY__SHIFT 0x1
@@ -4622,6 +4904,48 @@
#define TD_POWER_CNTL__MGCG_OUTPUTSTAGE_MASK 0x00000002L
#define TD_POWER_CNTL__MID0_THREAD_DATA_MASK 0x00000004L
#define TD_POWER_CNTL__MID2_ACCUM_DATA_MASK 0x00000008L
+//TD_UE_EDC_LO
+#define TD_UE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define TD_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1
+#define TD_UE_EDC_LO__ADDRESS__SHIFT 0x2
+#define TD_UE_EDC_LO__MEM_ID__SHIFT 0x18
+#define TD_UE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define TD_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L
+#define TD_UE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL
+#define TD_UE_EDC_LO__MEM_ID_MASK 0xFF000000L
+//TD_UE_EDC_HI
+#define TD_UE_EDC_HI__ECC__SHIFT 0x0
+#define TD_UE_EDC_HI__PARITY__SHIFT 0x1
+#define TD_UE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define TD_UE_EDC_HI__ERR_INFO__SHIFT 0x3
+#define TD_UE_EDC_HI__UE_CNT__SHIFT 0x17
+#define TD_UE_EDC_HI__FED_CNT__SHIFT 0x1a
+#define TD_UE_EDC_HI__ECC_MASK 0x00000001L
+#define TD_UE_EDC_HI__PARITY_MASK 0x00000002L
+#define TD_UE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TD_UE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L
+#define TD_UE_EDC_HI__UE_CNT_MASK 0x03800000L
+#define TD_UE_EDC_HI__FED_CNT_MASK 0x1C000000L
+//TD_CE_EDC_LO
+#define TD_CE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define TD_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1
+#define TD_CE_EDC_LO__ADDRESS__SHIFT 0x2
+#define TD_CE_EDC_LO__MEM_ID__SHIFT 0x18
+#define TD_CE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define TD_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L
+#define TD_CE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL
+#define TD_CE_EDC_LO__MEM_ID_MASK 0xFF000000L
+//TD_CE_EDC_HI
+#define TD_CE_EDC_HI__ECC__SHIFT 0x0
+#define TD_CE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define TD_CE_EDC_HI__ERR_INFO__SHIFT 0x3
+#define TD_CE_EDC_HI__CE_CNT__SHIFT 0x17
+#define TD_CE_EDC_HI__POISON__SHIFT 0x1a
+#define TD_CE_EDC_HI__ECC_MASK 0x00000001L
+#define TD_CE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TD_CE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L
+#define TD_CE_EDC_HI__CE_CNT_MASK 0x03800000L
+#define TD_CE_EDC_HI__POISON_MASK 0x04000000L
//TD_DSM_CNTL
#define TD_DSM_CNTL__TD_SS_FIFO_LO_DSM_IRRITATOR_DATA__SHIFT 0x0
#define TD_DSM_CNTL__TD_SS_FIFO_LO_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -4771,6 +5095,48 @@
#define TA_DSM_CNTL2__TA_FS_AFIFO_HI_ENABLE_ERROR_INJECT_MASK 0x000C0000L
#define TA_DSM_CNTL2__TA_FS_AFIFO_HI_SELECT_INJECT_DELAY_MASK 0x00100000L
#define TA_DSM_CNTL2__TA_INJECT_DELAY_MASK 0xFC000000L
+//TA_UE_EDC_LO
+#define TA_UE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define TA_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1
+#define TA_UE_EDC_LO__ADDRESS__SHIFT 0x2
+#define TA_UE_EDC_LO__MEM_ID__SHIFT 0x18
+#define TA_UE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define TA_UE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L
+#define TA_UE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL
+#define TA_UE_EDC_LO__MEM_ID_MASK 0xFF000000L
+//TA_UE_EDC_HI
+#define TA_UE_EDC_HI__ECC__SHIFT 0x0
+#define TA_UE_EDC_HI__PARITY__SHIFT 0x1
+#define TA_UE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define TA_UE_EDC_HI__ERR_INFO__SHIFT 0x3
+#define TA_UE_EDC_HI__UE_CNT__SHIFT 0x17
+#define TA_UE_EDC_HI__FED_CNT__SHIFT 0x1a
+#define TA_UE_EDC_HI__ECC_MASK 0x00000001L
+#define TA_UE_EDC_HI__PARITY_MASK 0x00000002L
+#define TA_UE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TA_UE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L
+#define TA_UE_EDC_HI__UE_CNT_MASK 0x03800000L
+#define TA_UE_EDC_HI__FED_CNT_MASK 0x1C000000L
+//TA_CE_EDC_LO
+#define TA_CE_EDC_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define TA_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1
+#define TA_CE_EDC_LO__ADDRESS__SHIFT 0x2
+#define TA_CE_EDC_LO__MEM_ID__SHIFT 0x18
+#define TA_CE_EDC_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define TA_CE_EDC_LO__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L
+#define TA_CE_EDC_LO__ADDRESS_MASK 0x00FFFFFCL
+#define TA_CE_EDC_LO__MEM_ID_MASK 0xFF000000L
+//TA_CE_EDC_HI
+#define TA_CE_EDC_HI__ECC__SHIFT 0x0
+#define TA_CE_EDC_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define TA_CE_EDC_HI__ERR_INFO__SHIFT 0x3
+#define TA_CE_EDC_HI__CE_CNT__SHIFT 0x17
+#define TA_CE_EDC_HI__POISON__SHIFT 0x1a
+#define TA_CE_EDC_HI__ECC_MASK 0x00000001L
+#define TA_CE_EDC_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TA_CE_EDC_HI__ERR_INFO_MASK 0x007FFFF8L
+#define TA_CE_EDC_HI__CE_CNT_MASK 0x03800000L
+#define TA_CE_EDC_HI__POISON_MASK 0x04000000L
// addressBlock: xcd0_gc_gdsdec
@@ -5015,6 +5381,54 @@
#define GDS_WD_GDS_CSB__UNUSED__SHIFT 0xd
#define GDS_WD_GDS_CSB__COUNTER_MASK 0x00001FFFL
#define GDS_WD_GDS_CSB__UNUSED_MASK 0xFFFFE000L
+//GDS_UE_ERR_STATUS_LO
+#define GDS_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define GDS_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define GDS_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define GDS_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define GDS_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define GDS_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define GDS_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define GDS_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//GDS_UE_ERR_STATUS_HI
+#define GDS_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define GDS_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define GDS_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define GDS_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define GDS_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define GDS_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define GDS_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define GDS_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define GDS_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define GDS_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define GDS_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define GDS_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define GDS_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define GDS_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//GDS_CE_ERR_STATUS_LO
+#define GDS_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define GDS_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define GDS_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define GDS_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define GDS_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define GDS_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define GDS_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define GDS_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//GDS_CE_ERR_STATUS_HI
+#define GDS_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define GDS_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define GDS_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define GDS_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define GDS_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define GDS_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define GDS_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define GDS_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define GDS_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define GDS_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define GDS_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define GDS_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define GDS_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define GDS_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
// addressBlock: xcd0_gc_rbdec
@@ -7370,6 +7784,30 @@
#define GCEA_MAM_CTRL2__ARAM_FLUSH_NOALLOC_MASK 0x00000040L
#define GCEA_MAM_CTRL2__RESERVED_FIELD_MASK 0x00FFFF80L
#define GCEA_MAM_CTRL2__ADDR_HI_MASK 0xFF000000L
+//GCEA_UE_ERR_STATUS_LO
+#define GCEA_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define GCEA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define GCEA_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define GCEA_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define GCEA_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define GCEA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define GCEA_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define GCEA_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//GCEA_UE_ERR_STATUS_HI
+#define GCEA_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define GCEA_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define GCEA_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define GCEA_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define GCEA_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define GCEA_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define GCEA_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d
+#define GCEA_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define GCEA_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define GCEA_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define GCEA_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define GCEA_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define GCEA_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define GCEA_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L
//GCEA_DSM_CNTL
#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -7745,6 +8183,30 @@
#define GCEA_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED_MASK 0x0000FF00L
#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_REQ_CREDITS_RELEASED_MASK 0x007F0000L
#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED_MASK 0x3F800000L
+//GCEA_CE_ERR_STATUS_LO
+#define GCEA_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define GCEA_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define GCEA_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define GCEA_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define GCEA_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define GCEA_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define GCEA_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define GCEA_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//GCEA_CE_ERR_STATUS_HI
+#define GCEA_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define GCEA_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1
+#define GCEA_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define GCEA_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define GCEA_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define GCEA_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define GCEA_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b
+#define GCEA_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define GCEA_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L
+#define GCEA_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define GCEA_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define GCEA_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define GCEA_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define GCEA_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L
//GCEA_SDP_ENABLE
#define GCEA_SDP_ENABLE__ENABLE__SHIFT 0x0
#define GCEA_SDP_ENABLE__ENABLE_MASK 0x00000001L
@@ -8440,6 +8902,54 @@
//ATC_L2_MM_GROUP_RT_CLASSES
#define ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS__SHIFT 0x0
#define ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS_MASK 0xFFFFFFFFL
+//ATC_L2_UE_ERR_STATUS_LO
+#define ATC_L2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define ATC_L2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define ATC_L2_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define ATC_L2_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define ATC_L2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define ATC_L2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define ATC_L2_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define ATC_L2_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//ATC_L2_UE_ERR_STATUS_HI
+#define ATC_L2_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define ATC_L2_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define ATC_L2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define ATC_L2_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define ATC_L2_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define ATC_L2_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define ATC_L2_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define ATC_L2_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define ATC_L2_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define ATC_L2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define ATC_L2_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define ATC_L2_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define ATC_L2_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define ATC_L2_UE_ERR_STATUS_HI__RESERVED_MASK 0x60000000L
+//ATC_L2_CE_ERR_STATUS_LO
+#define ATC_L2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define ATC_L2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define ATC_L2_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define ATC_L2_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define ATC_L2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define ATC_L2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define ATC_L2_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define ATC_L2_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//ATC_L2_CE_ERR_STATUS_HI
+#define ATC_L2_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define ATC_L2_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define ATC_L2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define ATC_L2_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define ATC_L2_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define ATC_L2_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define ATC_L2_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define ATC_L2_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define ATC_L2_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define ATC_L2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define ATC_L2_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define ATC_L2_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define ATC_L2_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define ATC_L2_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
// addressBlock: xcd0_gc_utcl2_vml2pfdec
@@ -8888,6 +9398,150 @@
#define UTCL2_EDC_CONFIG__DIS_EDC__SHIFT 0x1
#define UTCL2_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
#define UTCL2_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//VML2_UE_ERR_STATUS_LO
+#define VML2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define VML2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define VML2_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define VML2_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define VML2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define VML2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define VML2_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define VML2_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//VML2_WALKER_UE_ERR_STATUS_LO
+#define VML2_WALKER_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define VML2_WALKER_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define VML2_WALKER_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define VML2_WALKER_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define VML2_WALKER_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define VML2_WALKER_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define VML2_WALKER_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define VML2_WALKER_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//UTCL2_UE_ERR_STATUS_LO
+#define UTCL2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define UTCL2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define UTCL2_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define UTCL2_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define UTCL2_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define UTCL2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define UTCL2_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define UTCL2_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//VML2_UE_ERR_STATUS_HI
+#define VML2_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define VML2_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define VML2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define VML2_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define VML2_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define VML2_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define VML2_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define VML2_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define VML2_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define VML2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define VML2_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define VML2_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define VML2_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define VML2_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//VML2_WALKER_UE_ERR_STATUS_HI
+#define VML2_WALKER_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define VML2_WALKER_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define VML2_WALKER_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define VML2_WALKER_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define VML2_WALKER_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define VML2_WALKER_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define VML2_WALKER_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define VML2_WALKER_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define VML2_WALKER_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define VML2_WALKER_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define VML2_WALKER_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define VML2_WALKER_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define VML2_WALKER_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define VML2_WALKER_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//UTCL2_UE_ERR_STATUS_HI
+#define UTCL2_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define UTCL2_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define UTCL2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define UTCL2_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define UTCL2_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define UTCL2_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define UTCL2_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define UTCL2_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define UTCL2_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define UTCL2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define UTCL2_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define UTCL2_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define UTCL2_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define UTCL2_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//VML2_CE_ERR_STATUS_LO
+#define VML2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define VML2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define VML2_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define VML2_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define VML2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define VML2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define VML2_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define VML2_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//VML2_WALKER_CE_ERR_STATUS_LO
+#define VML2_WALKER_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define VML2_WALKER_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define VML2_WALKER_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define VML2_WALKER_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define VML2_WALKER_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define VML2_WALKER_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define VML2_WALKER_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define VML2_WALKER_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//UTCL2_CE_ERR_STATUS_LO
+#define UTCL2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define UTCL2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define UTCL2_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define UTCL2_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define UTCL2_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define UTCL2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define UTCL2_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define UTCL2_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//VML2_CE_ERR_STATUS_HI
+#define VML2_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define VML2_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define VML2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define VML2_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define VML2_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define VML2_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define VML2_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define VML2_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define VML2_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define VML2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define VML2_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define VML2_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define VML2_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define VML2_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
+//VML2_WALKER_CE_ERR_STATUS_HI
+#define VML2_WALKER_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define VML2_WALKER_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define VML2_WALKER_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define VML2_WALKER_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define VML2_WALKER_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define VML2_WALKER_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define VML2_WALKER_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define VML2_WALKER_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define VML2_WALKER_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define VML2_WALKER_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define VML2_WALKER_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define VML2_WALKER_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define VML2_WALKER_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define VML2_WALKER_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
+//UTCL2_CE_ERR_STATUS_HI
+#define UTCL2_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define UTCL2_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define UTCL2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define UTCL2_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define UTCL2_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define UTCL2_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define UTCL2_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define UTCL2_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define UTCL2_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define UTCL2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define UTCL2_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define UTCL2_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define UTCL2_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define UTCL2_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
// addressBlock: xcd0_gc_utcl2_vml2vcdec
@@ -11139,6 +11793,98 @@
//TC_CFG_L2_VOLATILE
#define TC_CFG_L2_VOLATILE__VOL__SHIFT 0x0
#define TC_CFG_L2_VOLATILE__VOL_MASK 0x0000000FL
+//TCP_UE_EDC_HI_REG
+#define TCP_UE_EDC_HI_REG__ECC__SHIFT 0x0
+#define TCP_UE_EDC_HI_REG__PARITY__SHIFT 0x1
+#define TCP_UE_EDC_HI_REG__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define TCP_UE_EDC_HI_REG__ERR_INFO__SHIFT 0x3
+#define TCP_UE_EDC_HI_REG__UE_CNT__SHIFT 0x17
+#define TCP_UE_EDC_HI_REG__FED_CNT__SHIFT 0x1a
+#define TCP_UE_EDC_HI_REG__RESERVED__SHIFT 0x1d
+#define TCP_UE_EDC_HI_REG__ECC_MASK 0x00000001L
+#define TCP_UE_EDC_HI_REG__PARITY_MASK 0x00000002L
+#define TCP_UE_EDC_HI_REG__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TCP_UE_EDC_HI_REG__ERR_INFO_MASK 0x007FFFF8L
+#define TCP_UE_EDC_HI_REG__UE_CNT_MASK 0x03800000L
+#define TCP_UE_EDC_HI_REG__FED_CNT_MASK 0x1C000000L
+#define TCP_UE_EDC_HI_REG__RESERVED_MASK 0xE0000000L
+//TCP_UE_EDC_LO_REG
+#define TCP_UE_EDC_LO_REG__STATUS_VALID_FLAG__SHIFT 0x0
+#define TCP_UE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1
+#define TCP_UE_EDC_LO_REG__ADDRESS__SHIFT 0x2
+#define TCP_UE_EDC_LO_REG__MEM_ID__SHIFT 0x18
+#define TCP_UE_EDC_LO_REG__STATUS_VALID_FLAG_MASK 0x00000001L
+#define TCP_UE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L
+#define TCP_UE_EDC_LO_REG__ADDRESS_MASK 0x00FFFFFCL
+#define TCP_UE_EDC_LO_REG__MEM_ID_MASK 0xFF000000L
+//TCP_CE_EDC_HI_REG
+#define TCP_CE_EDC_HI_REG__ECC__SHIFT 0x0
+#define TCP_CE_EDC_HI_REG__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define TCP_CE_EDC_HI_REG__ERR_INFO__SHIFT 0x3
+#define TCP_CE_EDC_HI_REG__CE_CNT__SHIFT 0x17
+#define TCP_CE_EDC_HI_REG__POISON__SHIFT 0x1a
+#define TCP_CE_EDC_HI_REG__RESERVED__SHIFT 0x1b
+#define TCP_CE_EDC_HI_REG__ECC_MASK 0x00000001L
+#define TCP_CE_EDC_HI_REG__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TCP_CE_EDC_HI_REG__ERR_INFO_MASK 0x007FFFF8L
+#define TCP_CE_EDC_HI_REG__CE_CNT_MASK 0x03800000L
+#define TCP_CE_EDC_HI_REG__POISON_MASK 0x04000000L
+#define TCP_CE_EDC_HI_REG__RESERVED_MASK 0xF8000000L
+//TCP_CE_EDC_LO_REG
+#define TCP_CE_EDC_LO_REG__STATUS_VALID_FLAG__SHIFT 0x0
+#define TCP_CE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1
+#define TCP_CE_EDC_LO_REG__ADDRESS__SHIFT 0x2
+#define TCP_CE_EDC_LO_REG__MEM_ID__SHIFT 0x18
+#define TCP_CE_EDC_LO_REG__STATUS_VALID_FLAG_MASK 0x00000001L
+#define TCP_CE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L
+#define TCP_CE_EDC_LO_REG__ADDRESS_MASK 0x00FFFFFCL
+#define TCP_CE_EDC_LO_REG__MEM_ID_MASK 0xFF000000L
+//TCI_UE_EDC_HI_REG
+#define TCI_UE_EDC_HI_REG__ECC__SHIFT 0x0
+#define TCI_UE_EDC_HI_REG__PARITY__SHIFT 0x1
+#define TCI_UE_EDC_HI_REG__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define TCI_UE_EDC_HI_REG__ERR_INFO__SHIFT 0x3
+#define TCI_UE_EDC_HI_REG__UE_CNT__SHIFT 0x17
+#define TCI_UE_EDC_HI_REG__FED_CNT__SHIFT 0x1a
+#define TCI_UE_EDC_HI_REG__RESERVED__SHIFT 0x1d
+#define TCI_UE_EDC_HI_REG__ECC_MASK 0x00000001L
+#define TCI_UE_EDC_HI_REG__PARITY_MASK 0x00000002L
+#define TCI_UE_EDC_HI_REG__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TCI_UE_EDC_HI_REG__ERR_INFO_MASK 0x007FFFF8L
+#define TCI_UE_EDC_HI_REG__UE_CNT_MASK 0x03800000L
+#define TCI_UE_EDC_HI_REG__FED_CNT_MASK 0x1C000000L
+#define TCI_UE_EDC_HI_REG__RESERVED_MASK 0xE0000000L
+//TCI_UE_EDC_LO_REG
+#define TCI_UE_EDC_LO_REG__STATUS_VALID_FLAG__SHIFT 0x0
+#define TCI_UE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1
+#define TCI_UE_EDC_LO_REG__ADDRESS__SHIFT 0x2
+#define TCI_UE_EDC_LO_REG__MEM_ID__SHIFT 0x18
+#define TCI_UE_EDC_LO_REG__STATUS_VALID_FLAG_MASK 0x00000001L
+#define TCI_UE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L
+#define TCI_UE_EDC_LO_REG__ADDRESS_MASK 0x00FFFFFCL
+#define TCI_UE_EDC_LO_REG__MEM_ID_MASK 0xFF000000L
+//TCI_CE_EDC_HI_REG
+#define TCI_CE_EDC_HI_REG__ECC__SHIFT 0x0
+#define TCI_CE_EDC_HI_REG__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define TCI_CE_EDC_HI_REG__ERR_INFO__SHIFT 0x3
+#define TCI_CE_EDC_HI_REG__CE_CNT__SHIFT 0x17
+#define TCI_CE_EDC_HI_REG__POISON__SHIFT 0x1a
+#define TCI_CE_EDC_HI_REG__RESERVED__SHIFT 0x1b
+#define TCI_CE_EDC_HI_REG__ECC_MASK 0x00000001L
+#define TCI_CE_EDC_HI_REG__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TCI_CE_EDC_HI_REG__ERR_INFO_MASK 0x007FFFF8L
+#define TCI_CE_EDC_HI_REG__CE_CNT_MASK 0x03800000L
+#define TCI_CE_EDC_HI_REG__POISON_MASK 0x04000000L
+#define TCI_CE_EDC_HI_REG__RESERVED_MASK 0xF8000000L
+//TCI_CE_EDC_LO_REG
+#define TCI_CE_EDC_LO_REG__STATUS_VALID_FLAG__SHIFT 0x0
+#define TCI_CE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG__SHIFT 0x1
+#define TCI_CE_EDC_LO_REG__ADDRESS__SHIFT 0x2
+#define TCI_CE_EDC_LO_REG__MEM_ID__SHIFT 0x18
+#define TCI_CE_EDC_LO_REG__STATUS_VALID_FLAG_MASK 0x00000001L
+#define TCI_CE_EDC_LO_REG__ADDRESS_INFO_VALID_FLAG_MASK 0x00000002L
+#define TCI_CE_EDC_LO_REG__ADDRESS_MASK 0x00FFFFFCL
+#define TCI_CE_EDC_LO_REG__MEM_ID_MASK 0xFF000000L
//TCI_MISC
#define TCI_MISC__FGCG_REPEATER_DISABLE__SHIFT 0x0
#define TCI_MISC__LEGACY_MGCG_DISABLE__SHIFT 0x1
@@ -11560,6 +12306,112 @@
#define TCX_DSM_CNTL2__SED_ENABLE_ERROR_INJECT_MASK 0x00000003L
#define TCX_DSM_CNTL2__SED_SELECT_INJECT_DELAY_MASK 0x00000004L
#define TCX_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//TCA_UE_ERR_STATUS_LO
+#define TCA_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define TCA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define TCA_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define TCA_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define TCA_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define TCA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define TCA_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define TCA_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//TCA_UE_ERR_STATUS_HI
+#define TCA_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define TCA_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define TCA_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG__SHIFT 0x2
+#define TCA_UE_ERR_STATUS_HI__ERROR_INFO__SHIFT 0x3
+#define TCA_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define TCA_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define TCA_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define TCA_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define TCA_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TCA_UE_ERR_STATUS_HI__ERROR_INFO_MASK 0x007FFFF8L
+#define TCA_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define TCA_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+//TCX_UE_ERR_STATUS_LO
+#define TCX_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define TCX_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define TCX_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define TCX_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define TCX_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define TCX_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define TCX_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define TCX_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//TCX_UE_ERR_STATUS_HI
+#define TCX_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define TCX_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define TCX_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG__SHIFT 0x2
+#define TCX_UE_ERR_STATUS_HI__ERROR_INFO__SHIFT 0x3
+#define TCX_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define TCX_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define TCX_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define TCX_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define TCX_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TCX_UE_ERR_STATUS_HI__ERROR_INFO_MASK 0x007FFFF8L
+#define TCX_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define TCX_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+//TCX_CE_ERR_STATUS_LO
+#define TCX_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define TCX_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define TCX_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define TCX_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define TCX_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define TCX_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define TCX_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define TCX_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//TCX_CE_ERR_STATUS_HI
+#define TCX_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define TCX_CE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG__SHIFT 0x2
+#define TCX_CE_ERR_STATUS_HI__ERROR_INFO__SHIFT 0x3
+#define TCX_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define TCX_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define TCX_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define TCX_CE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TCX_CE_ERR_STATUS_HI__ERROR_INFO_MASK 0x007FFFF8L
+#define TCX_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define TCX_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+//TCC_UE_ERR_STATUS_LO
+#define TCC_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define TCC_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define TCC_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define TCC_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define TCC_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define TCC_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define TCC_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define TCC_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//TCC_UE_ERR_STATUS_HI
+#define TCC_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define TCC_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define TCC_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG__SHIFT 0x2
+#define TCC_UE_ERR_STATUS_HI__ERROR_INFO__SHIFT 0x3
+#define TCC_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define TCC_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define TCC_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define TCC_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define TCC_UE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TCC_UE_ERR_STATUS_HI__ERROR_INFO_MASK 0x007FFFF8L
+#define TCC_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define TCC_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+//TCC_CE_ERR_STATUS_LO
+#define TCC_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define TCC_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define TCC_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define TCC_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define TCC_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define TCC_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define TCC_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define TCC_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//TCC_CE_ERR_STATUS_HI
+#define TCC_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define TCC_CE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG__SHIFT 0x2
+#define TCC_CE_ERR_STATUS_HI__ERROR_INFO__SHIFT 0x3
+#define TCC_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define TCC_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define TCC_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define TCC_CE_ERR_STATUS_HI__ERROR_INFO_VALID_FLAG_MASK 0x00000004L
+#define TCC_CE_ERR_STATUS_HI__ERROR_INFO_MASK 0x007FFFF8L
+#define TCC_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define TCC_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
// addressBlock: xcd0_gc_shdec
@@ -14384,6 +15236,150 @@
#define CP_VMID_STATUS__PREEMPT_CE_STATUS__SHIFT 0x10
#define CP_VMID_STATUS__PREEMPT_DE_STATUS_MASK 0x0000FFFFL
#define CP_VMID_STATUS__PREEMPT_CE_STATUS_MASK 0xFFFF0000L
+//CPC_UE_ERR_STATUS_LO
+#define CPC_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define CPC_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define CPC_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define CPC_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define CPC_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define CPC_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define CPC_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define CPC_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//CPC_UE_ERR_STATUS_HI
+#define CPC_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define CPC_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define CPC_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define CPC_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define CPC_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define CPC_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define CPC_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define CPC_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define CPC_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define CPC_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define CPC_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define CPC_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define CPC_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define CPC_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//CPC_CE_ERR_STATUS_LO
+#define CPC_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define CPC_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define CPC_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define CPC_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define CPC_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define CPC_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define CPC_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define CPC_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//CPC_CE_ERR_STATUS_HI
+#define CPC_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define CPC_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define CPC_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define CPC_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define CPC_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define CPC_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define CPC_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define CPC_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define CPC_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define CPC_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define CPC_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define CPC_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define CPC_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define CPC_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
+//CPF_UE_ERR_STATUS_LO
+#define CPF_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define CPF_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define CPF_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define CPF_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define CPF_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define CPF_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define CPF_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define CPF_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//CPF_UE_ERR_STATUS_HI
+#define CPF_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define CPF_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define CPF_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define CPF_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define CPF_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define CPF_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define CPF_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define CPF_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define CPF_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define CPF_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define CPF_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define CPF_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define CPF_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define CPF_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//CPF_CE_ERR_STATUS_LO
+#define CPF_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define CPF_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define CPF_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define CPF_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define CPF_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define CPF_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define CPF_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define CPF_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//CPF_CE_ERR_STATUS_HI
+#define CPF_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define CPF_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define CPF_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define CPF_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define CPF_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define CPF_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define CPF_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define CPF_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define CPF_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define CPF_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define CPF_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define CPF_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define CPF_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define CPF_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
+//CPG_UE_ERR_STATUS_LO
+#define CPG_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define CPG_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define CPG_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define CPG_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define CPG_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define CPG_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define CPG_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define CPG_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//CPG_UE_ERR_STATUS_HI
+#define CPG_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define CPG_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define CPG_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define CPG_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define CPG_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define CPG_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define CPG_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define CPG_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define CPG_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define CPG_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define CPG_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define CPG_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define CPG_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define CPG_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
+//CPG_CE_ERR_STATUS_LO
+#define CPG_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define CPG_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define CPG_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define CPG_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define CPG_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define CPG_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define CPG_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define CPG_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//CPG_CE_ERR_STATUS_HI
+#define CPG_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define CPG_CE_ERR_STATUS_HI__OTHER__SHIFT 0x1
+#define CPG_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define CPG_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define CPG_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define CPG_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define CPG_CE_ERR_STATUS_HI__RESERVED__SHIFT 0x1b
+#define CPG_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define CPG_CE_ERR_STATUS_HI__OTHER_MASK 0x00000002L
+#define CPG_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define CPG_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define CPG_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define CPG_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define CPG_CE_ERR_STATUS_HI__RESERVED_MASK 0xF8000000L
// addressBlock: xcd0_gc_cppdec2
@@ -22764,6 +23760,74 @@
#define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN_MASK 0x00000030L
#define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN_MASK 0x000000C0L
+// addressBlock: xcd0_gc_gccanedec
+//GC_CANE_ERR_STATUS
+#define GC_CANE_ERR_STATUS__SDPM_RDRSP_STATUS__SHIFT 0x0
+#define GC_CANE_ERR_STATUS__SDPM_WRRSP_STATUS__SHIFT 0x4
+#define GC_CANE_ERR_STATUS__SDPM_RDRSP_DATASTATUS__SHIFT 0x8
+#define GC_CANE_ERR_STATUS__SDPM_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GC_CANE_ERR_STATUS__SDPS_DAT_ERROR__SHIFT 0xb
+#define GC_CANE_ERR_STATUS__SDPS_DAT_PARITY_ERROR__SHIFT 0xc
+#define GC_CANE_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xd
+#define GC_CANE_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xe
+#define GC_CANE_ERR_STATUS__BUSY_ON_UER_ERROR__SHIFT 0xf
+#define GC_CANE_ERR_STATUS__FUE_FLAG__SHIFT 0x10
+#define GC_CANE_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0x11
+#define GC_CANE_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x12
+#define GC_CANE_ERR_STATUS__SDPM_RDRSP_STATUS_MASK 0x0000000FL
+#define GC_CANE_ERR_STATUS__SDPM_WRRSP_STATUS_MASK 0x000000F0L
+#define GC_CANE_ERR_STATUS__SDPM_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GC_CANE_ERR_STATUS__SDPM_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GC_CANE_ERR_STATUS__SDPS_DAT_ERROR_MASK 0x00000800L
+#define GC_CANE_ERR_STATUS__SDPS_DAT_PARITY_ERROR_MASK 0x00001000L
+#define GC_CANE_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00002000L
+#define GC_CANE_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00004000L
+#define GC_CANE_ERR_STATUS__BUSY_ON_UER_ERROR_MASK 0x00008000L
+#define GC_CANE_ERR_STATUS__FUE_FLAG_MASK 0x00010000L
+#define GC_CANE_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00020000L
+#define GC_CANE_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00040000L
+//GC_CANE_UE_ERR_STATUS_LO
+#define GC_CANE_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define GC_CANE_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define GC_CANE_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define GC_CANE_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define GC_CANE_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define GC_CANE_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define GC_CANE_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define GC_CANE_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//GC_CANE_UE_ERR_STATUS_HI
+#define GC_CANE_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define GC_CANE_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define GC_CANE_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define GC_CANE_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define GC_CANE_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define GC_CANE_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define GC_CANE_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define GC_CANE_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define GC_CANE_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define GC_CANE_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define GC_CANE_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define GC_CANE_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+//GC_CANE_CE_ERR_STATUS_LO
+#define GC_CANE_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define GC_CANE_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define GC_CANE_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define GC_CANE_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define GC_CANE_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define GC_CANE_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define GC_CANE_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define GC_CANE_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//GC_CANE_CE_ERR_STATUS_HI
+#define GC_CANE_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define GC_CANE_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define GC_CANE_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define GC_CANE_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define GC_CANE_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define GC_CANE_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define GC_CANE_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define GC_CANE_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define GC_CANE_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define GC_CANE_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
// addressBlock: xcd0_gc_perfddec
//CPG_PERFCOUNTER1_LO
@@ -26471,6 +27535,30 @@
//RLC_CPG_STAT_INVAL
#define RLC_CPG_STAT_INVAL__CPG_stat_inval__SHIFT 0x0
#define RLC_CPG_STAT_INVAL__CPG_stat_inval_MASK 0x00000001L
+//RLC_UE_ERR_STATUS_LOW
+#define RLC_UE_ERR_STATUS_LOW__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define RLC_UE_ERR_STATUS_LOW__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define RLC_UE_ERR_STATUS_LOW__ADDRESS__SHIFT 0x2
+#define RLC_UE_ERR_STATUS_LOW__MEMORY_ID__SHIFT 0x18
+#define RLC_UE_ERR_STATUS_LOW__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define RLC_UE_ERR_STATUS_LOW__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define RLC_UE_ERR_STATUS_LOW__ADDRESS_MASK 0x00FFFFFCL
+#define RLC_UE_ERR_STATUS_LOW__MEMORY_ID_MASK 0xFF000000L
+//RLC_UE_ERR_STATUS_HIGH
+#define RLC_UE_ERR_STATUS_HIGH__ECC__SHIFT 0x0
+#define RLC_UE_ERR_STATUS_HIGH__PARITY__SHIFT 0x1
+#define RLC_UE_ERR_STATUS_HIGH__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define RLC_UE_ERR_STATUS_HIGH__ERR_INFO__SHIFT 0x3
+#define RLC_UE_ERR_STATUS_HIGH__UE_CNT__SHIFT 0x17
+#define RLC_UE_ERR_STATUS_HIGH__FED_CNT__SHIFT 0x1a
+#define RLC_UE_ERR_STATUS_HIGH__RESERVED__SHIFT 0x1d
+#define RLC_UE_ERR_STATUS_HIGH__ECC_MASK 0x00000001L
+#define RLC_UE_ERR_STATUS_HIGH__PARITY_MASK 0x00000002L
+#define RLC_UE_ERR_STATUS_HIGH__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define RLC_UE_ERR_STATUS_HIGH__ERR_INFO_MASK 0x007FFFF8L
+#define RLC_UE_ERR_STATUS_HIGH__UE_CNT_MASK 0x03800000L
+#define RLC_UE_ERR_STATUS_HIGH__FED_CNT_MASK 0x1C000000L
+#define RLC_UE_ERR_STATUS_HIGH__RESERVED_MASK 0xE0000000L
//RLC_DSM_CNTL
#define RLC_DSM_CNTL__RLCG_INSTR_RAM_IRRITATOR_DATA_SEL__SHIFT 0x0
#define RLC_DSM_CNTL__RLCG_INSTR_RAM_IRRITATOR_SINGLE_WRITE__SHIFT 0x2
@@ -26573,6 +27661,30 @@
#define RLC_DSM_CNTL2A__RLC_SPM_SE2_SCRATCH_RAM_SELECT_INJECT_DELAY_MASK 0x00000100L
#define RLC_DSM_CNTL2A__RLC_SPM_SE3_SCRATCH_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
#define RLC_DSM_CNTL2A__RLC_SPM_SE3_SCRATCH_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+//RLC_CE_ERR_STATUS_LOW
+#define RLC_CE_ERR_STATUS_LOW__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define RLC_CE_ERR_STATUS_LOW__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define RLC_CE_ERR_STATUS_LOW__ADDRESS__SHIFT 0x2
+#define RLC_CE_ERR_STATUS_LOW__MEMORY_ID__SHIFT 0x18
+#define RLC_CE_ERR_STATUS_LOW__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define RLC_CE_ERR_STATUS_LOW__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define RLC_CE_ERR_STATUS_LOW__ADDRESS_MASK 0x00FFFFFCL
+#define RLC_CE_ERR_STATUS_LOW__MEMORY_ID_MASK 0xFF000000L
+//RLC_CE_ERR_STATUS_HIGH
+#define RLC_CE_ERR_STATUS_HIGH__ECC__SHIFT 0x0
+#define RLC_CE_ERR_STATUS_HIGH__OTHER__SHIFT 0x1
+#define RLC_CE_ERR_STATUS_HIGH__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define RLC_CE_ERR_STATUS_HIGH__ERR_INFO__SHIFT 0x3
+#define RLC_CE_ERR_STATUS_HIGH__CE_CNT__SHIFT 0x17
+#define RLC_CE_ERR_STATUS_HIGH__POISON__SHIFT 0x1a
+#define RLC_CE_ERR_STATUS_HIGH__RESERVED__SHIFT 0x1b
+#define RLC_CE_ERR_STATUS_HIGH__ECC_MASK 0x00000001L
+#define RLC_CE_ERR_STATUS_HIGH__OTHER_MASK 0x00000002L
+#define RLC_CE_ERR_STATUS_HIGH__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define RLC_CE_ERR_STATUS_HIGH__ERR_INFO_MASK 0x007FFFF8L
+#define RLC_CE_ERR_STATUS_HIGH__CE_CNT_MASK 0x03800000L
+#define RLC_CE_ERR_STATUS_HIGH__POISON_MASK 0x04000000L
+#define RLC_CE_ERR_STATUS_HIGH__RESERVED_MASK 0xF8000000L
//RLC_RLCV_SPARE_INT
#define RLC_RLCV_SPARE_INT__INTERRUPT__SHIFT 0x0
#define RLC_RLCV_SPARE_INT__RESERVED__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h
index 8bcc81f2dfc0..879ee9de3ff3 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h
@@ -1491,6 +1491,10 @@
#define regMMEA0_PERFCOUNTER1_CFG_BASE_IDX 0
#define regMMEA0_PERFCOUNTER_RSLT_CNTL 0x0400
#define regMMEA0_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regMMEA0_UE_ERR_STATUS_LO 0x0406
+#define regMMEA0_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regMMEA0_UE_ERR_STATUS_HI 0x0407
+#define regMMEA0_UE_ERR_STATUS_HI_BASE_IDX 0
#define regMMEA0_DSM_CNTL 0x0408
#define regMMEA0_DSM_CNTL_BASE_IDX 0
#define regMMEA0_DSM_CNTLA 0x0409
@@ -1511,8 +1515,12 @@
#define regMMEA0_ERR_STATUS_BASE_IDX 0
#define regMMEA0_MISC2 0x0412
#define regMMEA0_MISC2_BASE_IDX 0
+#define regMMEA0_CE_ERR_STATUS_LO 0x0414
+#define regMMEA0_CE_ERR_STATUS_LO_BASE_IDX 0
#define regMMEA0_MISC_AON 0x0415
#define regMMEA0_MISC_AON_BASE_IDX 0
+#define regMMEA0_CE_ERR_STATUS_HI 0x0416
+#define regMMEA0_CE_ERR_STATUS_HI_BASE_IDX 0
// addressBlock: aid_mmhub_ea_mmeadec1
@@ -1709,6 +1717,10 @@
#define regMMEA1_PERFCOUNTER1_CFG_BASE_IDX 0
#define regMMEA1_PERFCOUNTER_RSLT_CNTL 0x0540
#define regMMEA1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regMMEA1_UE_ERR_STATUS_LO 0x0546
+#define regMMEA1_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regMMEA1_UE_ERR_STATUS_HI 0x0547
+#define regMMEA1_UE_ERR_STATUS_HI_BASE_IDX 0
#define regMMEA1_DSM_CNTL 0x0548
#define regMMEA1_DSM_CNTL_BASE_IDX 0
#define regMMEA1_DSM_CNTLA 0x0549
@@ -1729,8 +1741,12 @@
#define regMMEA1_ERR_STATUS_BASE_IDX 0
#define regMMEA1_MISC2 0x0552
#define regMMEA1_MISC2_BASE_IDX 0
+#define regMMEA1_CE_ERR_STATUS_LO 0x0554
+#define regMMEA1_CE_ERR_STATUS_LO_BASE_IDX 0
#define regMMEA1_MISC_AON 0x0555
#define regMMEA1_MISC_AON_BASE_IDX 0
+#define regMMEA1_CE_ERR_STATUS_HI 0x0556
+#define regMMEA1_CE_ERR_STATUS_HI_BASE_IDX 0
// addressBlock: aid_mmhub_ea_mmeadec2
@@ -1927,6 +1943,10 @@
#define regMMEA2_PERFCOUNTER1_CFG_BASE_IDX 0
#define regMMEA2_PERFCOUNTER_RSLT_CNTL 0x0680
#define regMMEA2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regMMEA2_UE_ERR_STATUS_LO 0x0686
+#define regMMEA2_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regMMEA2_UE_ERR_STATUS_HI 0x0687
+#define regMMEA2_UE_ERR_STATUS_HI_BASE_IDX 0
#define regMMEA2_DSM_CNTL 0x0688
#define regMMEA2_DSM_CNTL_BASE_IDX 0
#define regMMEA2_DSM_CNTLA 0x0689
@@ -1947,8 +1967,12 @@
#define regMMEA2_ERR_STATUS_BASE_IDX 0
#define regMMEA2_MISC2 0x0692
#define regMMEA2_MISC2_BASE_IDX 0
+#define regMMEA2_CE_ERR_STATUS_LO 0x0694
+#define regMMEA2_CE_ERR_STATUS_LO_BASE_IDX 0
#define regMMEA2_MISC_AON 0x0695
#define regMMEA2_MISC_AON_BASE_IDX 0
+#define regMMEA2_CE_ERR_STATUS_HI 0x0696
+#define regMMEA2_CE_ERR_STATUS_HI_BASE_IDX 0
// addressBlock: aid_mmhub_ea_mmeadec3
@@ -2145,6 +2169,10 @@
#define regMMEA3_PERFCOUNTER1_CFG_BASE_IDX 0
#define regMMEA3_PERFCOUNTER_RSLT_CNTL 0x07c0
#define regMMEA3_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regMMEA3_UE_ERR_STATUS_LO 0x07c6
+#define regMMEA3_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regMMEA3_UE_ERR_STATUS_HI 0x07c7
+#define regMMEA3_UE_ERR_STATUS_HI_BASE_IDX 0
#define regMMEA3_DSM_CNTL 0x07c8
#define regMMEA3_DSM_CNTL_BASE_IDX 0
#define regMMEA3_DSM_CNTLA 0x07c9
@@ -2165,9 +2193,12 @@
#define regMMEA3_ERR_STATUS_BASE_IDX 0
#define regMMEA3_MISC2 0x07d2
#define regMMEA3_MISC2_BASE_IDX 0
+#define regMMEA3_CE_ERR_STATUS_LO 0x07d4
+#define regMMEA3_CE_ERR_STATUS_LO_BASE_IDX 0
#define regMMEA3_MISC_AON 0x07d5
#define regMMEA3_MISC_AON_BASE_IDX 0
-
+#define regMMEA3_CE_ERR_STATUS_HI 0x07d6
+#define regMMEA3_CE_ERR_STATUS_HI_BASE_IDX 0
// addressBlock: aid_mmhub_ea_mmeadec4
// base address: 0x62000
@@ -2363,6 +2394,10 @@
#define regMMEA4_PERFCOUNTER1_CFG_BASE_IDX 0
#define regMMEA4_PERFCOUNTER_RSLT_CNTL 0x0900
#define regMMEA4_PERFCOUNTER_RSLT_CNTL_BASE_IDX 0
+#define regMMEA4_UE_ERR_STATUS_LO 0x0906
+#define regMMEA4_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regMMEA4_UE_ERR_STATUS_HI 0x0907
+#define regMMEA4_UE_ERR_STATUS_HI_BASE_IDX 0
#define regMMEA4_DSM_CNTL 0x0908
#define regMMEA4_DSM_CNTL_BASE_IDX 0
#define regMMEA4_DSM_CNTLA 0x0909
@@ -2383,9 +2418,12 @@
#define regMMEA4_ERR_STATUS_BASE_IDX 0
#define regMMEA4_MISC2 0x0912
#define regMMEA4_MISC2_BASE_IDX 0
+#define regMMEA4_CE_ERR_STATUS_LO 0x0914
+#define regMMEA4_CE_ERR_STATUS_LO_BASE_IDX 0
#define regMMEA4_MISC_AON 0x0915
#define regMMEA4_MISC_AON_BASE_IDX 0
-
+#define regMMEA4_CE_ERR_STATUS_HI 0x0916
+#define regMMEA4_CE_ERR_STATUS_HI_BASE_IDX 0
// addressBlock: aid_mmhub_pctldec0
// base address: 0x62a00
@@ -3310,5 +3348,19 @@
#define regL2TLB_PERFCOUNTER_HI 0x0d2d
#define regL2TLB_PERFCOUNTER_HI_BASE_IDX 0
+// addressBlock: aid_mmhub_mm_cane_mmcanedec
+// base address: 0x635f0
+#define regMM_CANE_ICG_CTRL 0x0d8a
+#define regMM_CANE_ICG_CTRL_BASE_IDX 0
+#define regMM_CANE_ERR_STATUS 0x0d8c
+#define regMM_CANE_ERR_STATUS_BASE_IDX 0
+#define regMM_CANE_UE_ERR_STATUS_LO 0x0d8d
+#define regMM_CANE_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regMM_CANE_UE_ERR_STATUS_HI 0x0d8e
+#define regMM_CANE_UE_ERR_STATUS_HI_BASE_IDX 0
+#define regMM_CANE_CE_ERR_STATUS_LO 0x0d8f
+#define regMM_CANE_CE_ERR_STATUS_LO_BASE_IDX 0
+#define regMM_CANE_CE_ERR_STATUS_HI 0x0d90
+#define regMM_CANE_CE_ERR_STATUS_HI_BASE_IDX 0
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h
index af41468ce69f..088c1f02aa43 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h
@@ -10470,6 +10470,30 @@
#define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
#define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA0_UE_ERR_STATUS_LO
+#define MMEA0_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define MMEA0_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MMEA0_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MMEA0_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MMEA0_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define MMEA0_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MMEA0_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MMEA0_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//MMEA0_UE_ERR_STATUS_HI
+#define MMEA0_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MMEA0_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define MMEA0_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MMEA0_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MMEA0_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define MMEA0_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define MMEA0_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d
+#define MMEA0_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MMEA0_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define MMEA0_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MMEA0_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MMEA0_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define MMEA0_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define MMEA0_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L
//MMEA0_DSM_CNTL
#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -10718,12 +10742,35 @@
#define MMEA0_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L
#define MMEA0_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L
#define MMEA0_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L
+//MMEA0_CE_ERR_STATUS_LO
+#define MMEA0_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define MMEA0_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MMEA0_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MMEA0_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MMEA0_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define MMEA0_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MMEA0_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MMEA0_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
//MMEA0_MISC_AON
#define MMEA0_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0
#define MMEA0_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2
#define MMEA0_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L
#define MMEA0_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L
-
+//MMEA0_CE_ERR_STATUS_HI
+#define MMEA0_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MMEA0_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1
+#define MMEA0_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MMEA0_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MMEA0_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define MMEA0_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define MMEA0_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b
+#define MMEA0_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MMEA0_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L
+#define MMEA0_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MMEA0_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MMEA0_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define MMEA0_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define MMEA0_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L
// addressBlock: aid_mmhub_ea_mmeadec1
//MMEA1_DRAM_RD_CLI2GRP_MAP0
@@ -12418,6 +12465,30 @@
#define MMEA1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
#define MMEA1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA1_UE_ERR_STATUS_LO
+#define MMEA1_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define MMEA1_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MMEA1_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MMEA1_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MMEA1_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define MMEA1_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MMEA1_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MMEA1_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//MMEA1_UE_ERR_STATUS_HI
+#define MMEA1_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MMEA1_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define MMEA1_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MMEA1_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MMEA1_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define MMEA1_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define MMEA1_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d
+#define MMEA1_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MMEA1_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define MMEA1_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MMEA1_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MMEA1_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define MMEA1_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define MMEA1_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L
//MMEA1_DSM_CNTL
#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -12666,12 +12737,35 @@
#define MMEA1_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L
#define MMEA1_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L
#define MMEA1_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L
+//MMEA1_CE_ERR_STATUS_LO
+#define MMEA1_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define MMEA1_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MMEA1_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MMEA1_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MMEA1_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define MMEA1_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MMEA1_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MMEA1_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
//MMEA1_MISC_AON
#define MMEA1_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0
#define MMEA1_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2
#define MMEA1_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L
#define MMEA1_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L
-
+//MMEA1_CE_ERR_STATUS_HI
+#define MMEA1_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MMEA1_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1
+#define MMEA1_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MMEA1_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MMEA1_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define MMEA1_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define MMEA1_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b
+#define MMEA1_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MMEA1_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L
+#define MMEA1_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MMEA1_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MMEA1_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define MMEA1_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define MMEA1_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L
// addressBlock: aid_mmhub_ea_mmeadec2
//MMEA2_DRAM_RD_CLI2GRP_MAP0
@@ -14366,6 +14460,30 @@
#define MMEA2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
#define MMEA2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA2_UE_ERR_STATUS_LO
+#define MMEA2_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define MMEA2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MMEA2_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MMEA2_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MMEA2_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define MMEA2_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MMEA2_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MMEA2_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//MMEA2_UE_ERR_STATUS_HI
+#define MMEA2_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MMEA2_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define MMEA2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MMEA2_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MMEA2_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define MMEA2_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define MMEA2_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d
+#define MMEA2_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MMEA2_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define MMEA2_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MMEA2_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MMEA2_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define MMEA2_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define MMEA2_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L
//MMEA2_DSM_CNTL
#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -14614,12 +14732,35 @@
#define MMEA2_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L
#define MMEA2_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L
#define MMEA2_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L
+//MMEA2_CE_ERR_STATUS_LO
+#define MMEA2_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define MMEA2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MMEA2_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MMEA2_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MMEA2_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define MMEA2_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MMEA2_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MMEA2_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
//MMEA2_MISC_AON
#define MMEA2_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0
#define MMEA2_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2
#define MMEA2_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L
#define MMEA2_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L
-
+//MMEA2_CE_ERR_STATUS_HI
+#define MMEA2_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MMEA2_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1
+#define MMEA2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MMEA2_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MMEA2_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define MMEA2_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define MMEA2_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b
+#define MMEA2_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MMEA2_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L
+#define MMEA2_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MMEA2_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MMEA2_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define MMEA2_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define MMEA2_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L
// addressBlock: aid_mmhub_ea_mmeadec3
//MMEA3_DRAM_RD_CLI2GRP_MAP0
@@ -16314,6 +16455,30 @@
#define MMEA3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
#define MMEA3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA3_UE_ERR_STATUS_LO
+#define MMEA3_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define MMEA3_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MMEA3_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MMEA3_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MMEA3_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define MMEA3_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MMEA3_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MMEA3_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//MMEA3_UE_ERR_STATUS_HI
+#define MMEA3_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MMEA3_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define MMEA3_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MMEA3_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MMEA3_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define MMEA3_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define MMEA3_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d
+#define MMEA3_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MMEA3_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define MMEA3_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MMEA3_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MMEA3_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define MMEA3_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define MMEA3_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L
//MMEA3_DSM_CNTL
#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -16562,12 +16727,35 @@
#define MMEA3_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L
#define MMEA3_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L
#define MMEA3_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L
+//MMEA3_CE_ERR_STATUS_LO
+#define MMEA3_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define MMEA3_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MMEA3_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MMEA3_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MMEA3_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define MMEA3_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MMEA3_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MMEA3_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
//MMEA3_MISC_AON
#define MMEA3_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0
#define MMEA3_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2
#define MMEA3_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L
#define MMEA3_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L
-
+//MMEA3_CE_ERR_STATUS_HI
+#define MMEA3_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MMEA3_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1
+#define MMEA3_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MMEA3_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MMEA3_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define MMEA3_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define MMEA3_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b
+#define MMEA3_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MMEA3_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L
+#define MMEA3_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MMEA3_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MMEA3_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define MMEA3_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define MMEA3_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L
// addressBlock: aid_mmhub_ea_mmeadec4
//MMEA4_DRAM_RD_CLI2GRP_MAP0
@@ -18262,6 +18450,30 @@
#define MMEA4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
#define MMEA4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//MMEA4_UE_ERR_STATUS_LO
+#define MMEA4_UE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define MMEA4_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MMEA4_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MMEA4_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MMEA4_UE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define MMEA4_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MMEA4_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MMEA4_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//MMEA4_UE_ERR_STATUS_HI
+#define MMEA4_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MMEA4_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define MMEA4_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MMEA4_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MMEA4_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define MMEA4_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define MMEA4_UE_ERR_STATUS_HI__RESERVED_FIELD__SHIFT 0x1d
+#define MMEA4_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MMEA4_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define MMEA4_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MMEA4_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MMEA4_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define MMEA4_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define MMEA4_UE_ERR_STATUS_HI__RESERVED_FIELD_MASK 0xE0000000L
//MMEA4_DSM_CNTL
#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
@@ -18510,12 +18722,35 @@
#define MMEA4_MISC2__DRAM_WR_THROTTLE_MASK 0x00020000L
#define MMEA4_MISC2__GMI_RD_THROTTLE_MASK 0x00040000L
#define MMEA4_MISC2__GMI_WR_THROTTLE_MASK 0x00080000L
+//MMEA4_CE_ERR_STATUS_LO
+#define MMEA4_CE_ERR_STATUS_LO__STATUS_VALID_FLAG__SHIFT 0x0
+#define MMEA4_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MMEA4_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MMEA4_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MMEA4_CE_ERR_STATUS_LO__STATUS_VALID_FLAG_MASK 0x00000001L
+#define MMEA4_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MMEA4_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MMEA4_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
//MMEA4_MISC_AON
#define MMEA4_MISC_AON__LINKMGR_PARTACK_HYSTERESIS__SHIFT 0x0
#define MMEA4_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE__SHIFT 0x2
#define MMEA4_MISC_AON__LINKMGR_PARTACK_HYSTERESIS_MASK 0x00000003L
#define MMEA4_MISC_AON__LINKMGR_PARTACK_DEASSERT_MODE_MASK 0x00000004L
-
+//MMEA4_CE_ERR_STATUS_HI
+#define MMEA4_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MMEA4_CE_ERR_STATUS_HI__RESERVED_FIELD0__SHIFT 0x1
+#define MMEA4_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MMEA4_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MMEA4_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define MMEA4_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define MMEA4_CE_ERR_STATUS_HI__RESERVED_FIELD1__SHIFT 0x1b
+#define MMEA4_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MMEA4_CE_ERR_STATUS_HI__RESERVED_FIELD0_MASK 0x00000002L
+#define MMEA4_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MMEA4_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MMEA4_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define MMEA4_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
+#define MMEA4_CE_ERR_STATUS_HI__RESERVED_FIELD1_MASK 0xF8000000L
// addressBlock: aid_mmhub_pctldec0
//PCTL0_CTRL
@@ -22311,5 +22546,83 @@
#define L2TLB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
#define L2TLB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
-
+// addressBlock: aid_mmhub_mm_cane_mmcanedec
+//MM_CANE_ICG_CTRL
+#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_IREQ0__SHIFT 0x0
+#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_ATRET__SHIFT 0x1
+#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_OREQ__SHIFT 0x2
+#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x3
+#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_SDPM_RETURN__SHIFT 0x4
+#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_IREQ0_MASK 0x00000001L
+#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_ATRET_MASK 0x00000002L
+#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_OREQ_MASK 0x00000004L
+#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x00000008L
+#define MM_CANE_ICG_CTRL__SOFT_OVERRIDE_SDPM_RETURN_MASK 0x00000010L
+//MM_CANE_ERR_STATUS
+#define MM_CANE_ERR_STATUS__SDPM_RDRSP_STATUS__SHIFT 0x0
+#define MM_CANE_ERR_STATUS__SDPM_WRRSP_STATUS__SHIFT 0x4
+#define MM_CANE_ERR_STATUS__SDPM_RDRSP_DATASTATUS__SHIFT 0x8
+#define MM_CANE_ERR_STATUS__SDPM_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define MM_CANE_ERR_STATUS__SDPS_DAT_ERROR__SHIFT 0xb
+#define MM_CANE_ERR_STATUS__SDPS_DAT_PARITY_ERROR__SHIFT 0xc
+#define MM_CANE_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xd
+#define MM_CANE_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xe
+#define MM_CANE_ERR_STATUS__BUSY_ON_UER_ERROR__SHIFT 0xf
+#define MM_CANE_ERR_STATUS__FUE_FLAG__SHIFT 0x10
+#define MM_CANE_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0x11
+#define MM_CANE_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x12
+#define MM_CANE_ERR_STATUS__SDPM_RDRSP_STATUS_MASK 0x0000000FL
+#define MM_CANE_ERR_STATUS__SDPM_WRRSP_STATUS_MASK 0x000000F0L
+#define MM_CANE_ERR_STATUS__SDPM_RDRSP_DATASTATUS_MASK 0x00000300L
+#define MM_CANE_ERR_STATUS__SDPM_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define MM_CANE_ERR_STATUS__SDPS_DAT_ERROR_MASK 0x00000800L
+#define MM_CANE_ERR_STATUS__SDPS_DAT_PARITY_ERROR_MASK 0x00001000L
+#define MM_CANE_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00002000L
+#define MM_CANE_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00004000L
+#define MM_CANE_ERR_STATUS__BUSY_ON_UER_ERROR_MASK 0x00008000L
+#define MM_CANE_ERR_STATUS__FUE_FLAG_MASK 0x00010000L
+#define MM_CANE_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00020000L
+#define MM_CANE_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00040000L
+//MM_CANE_UE_ERR_STATUS_LO
+#define MM_CANE_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define MM_CANE_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MM_CANE_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MM_CANE_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MM_CANE_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define MM_CANE_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MM_CANE_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MM_CANE_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//MM_CANE_UE_ERR_STATUS_HI
+#define MM_CANE_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MM_CANE_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define MM_CANE_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MM_CANE_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MM_CANE_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define MM_CANE_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define MM_CANE_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MM_CANE_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define MM_CANE_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MM_CANE_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MM_CANE_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define MM_CANE_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+//MM_CANE_CE_ERR_STATUS_LO
+#define MM_CANE_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define MM_CANE_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define MM_CANE_CE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define MM_CANE_CE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define MM_CANE_CE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define MM_CANE_CE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define MM_CANE_CE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define MM_CANE_CE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//MM_CANE_CE_ERR_STATUS_HI
+#define MM_CANE_CE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define MM_CANE_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define MM_CANE_CE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define MM_CANE_CE_ERR_STATUS_HI__CE_CNT__SHIFT 0x17
+#define MM_CANE_CE_ERR_STATUS_HI__POISON__SHIFT 0x1a
+#define MM_CANE_CE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define MM_CANE_CE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define MM_CANE_CE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define MM_CANE_CE_ERR_STATUS_HI__CE_CNT_MASK 0x03800000L
+#define MM_CANE_CE_ERR_STATUS_HI__POISON_MASK 0x04000000L
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h
index 033f2796c1e3..c8a15c8f4822 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h
@@ -6201,6 +6201,8 @@
#define regNBIF_SHUB_TODET_SYNCFLOOD_CTRL2_BASE_IDX 8
#define regBIFC_BME_ERR_LOG_HB 0xe8ab
#define regBIFC_BME_ERR_LOG_HB_BASE_IDX 8
+#define regBIFC_GFX_INT_MONITOR_MASK 0xe8ad
+#define regBIFC_GFX_INT_MONITOR_MASK_BASE_IDX 8
#define regBIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC 0xe8c0
#define regBIFC_HRP_SDP_WRRSP_POOLCRED_ALLOC_BASE_IDX 8
#define regBIFC_HRP_SDP_RDRSP_POOLCRED_ALLOC 0xe8c1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h
index a22481e7bcdb..e0c28c29ddb0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h
@@ -38896,5 +38896,13 @@
#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L
#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
+//PCIE_PERF_CNTL_TXCLK3
+#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL_MASK 0x000000FFL
+
+//PCIE_PERF_CNTL_TXCLK7
+#define PCIE_PERF_CNTL_TXCLK7__EVENT0_SEL__SHIFT 0x0
+#define PCIE_PERF_CNTL_TXCLK7__EVENT0_SEL_MASK 0x000000FFL
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_offset.h
new file mode 100644
index 000000000000..a5e7ba5d99ca
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_offset.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_6_1_0_OFFSET_HEADER
+#define _osssys_6_1_0_OFFSET_HEADER
+
+
+
+// addressBlock: osssys_osssysdec
+// base address: 0x4280
+#define regIH_VMID_0_LUT 0x0000
+#define regIH_VMID_0_LUT_BASE_IDX 0
+#define regIH_VMID_1_LUT 0x0001
+#define regIH_VMID_1_LUT_BASE_IDX 0
+#define regIH_VMID_2_LUT 0x0002
+#define regIH_VMID_2_LUT_BASE_IDX 0
+#define regIH_VMID_3_LUT 0x0003
+#define regIH_VMID_3_LUT_BASE_IDX 0
+#define regIH_VMID_4_LUT 0x0004
+#define regIH_VMID_4_LUT_BASE_IDX 0
+#define regIH_VMID_5_LUT 0x0005
+#define regIH_VMID_5_LUT_BASE_IDX 0
+#define regIH_VMID_6_LUT 0x0006
+#define regIH_VMID_6_LUT_BASE_IDX 0
+#define regIH_VMID_7_LUT 0x0007
+#define regIH_VMID_7_LUT_BASE_IDX 0
+#define regIH_VMID_8_LUT 0x0008
+#define regIH_VMID_8_LUT_BASE_IDX 0
+#define regIH_VMID_9_LUT 0x0009
+#define regIH_VMID_9_LUT_BASE_IDX 0
+#define regIH_VMID_10_LUT 0x000a
+#define regIH_VMID_10_LUT_BASE_IDX 0
+#define regIH_VMID_11_LUT 0x000b
+#define regIH_VMID_11_LUT_BASE_IDX 0
+#define regIH_VMID_12_LUT 0x000c
+#define regIH_VMID_12_LUT_BASE_IDX 0
+#define regIH_VMID_13_LUT 0x000d
+#define regIH_VMID_13_LUT_BASE_IDX 0
+#define regIH_VMID_14_LUT 0x000e
+#define regIH_VMID_14_LUT_BASE_IDX 0
+#define regIH_VMID_15_LUT 0x000f
+#define regIH_VMID_15_LUT_BASE_IDX 0
+#define regIH_VMID_0_LUT_MM 0x0010
+#define regIH_VMID_0_LUT_MM_BASE_IDX 0
+#define regIH_VMID_1_LUT_MM 0x0011
+#define regIH_VMID_1_LUT_MM_BASE_IDX 0
+#define regIH_VMID_2_LUT_MM 0x0012
+#define regIH_VMID_2_LUT_MM_BASE_IDX 0
+#define regIH_VMID_3_LUT_MM 0x0013
+#define regIH_VMID_3_LUT_MM_BASE_IDX 0
+#define regIH_VMID_4_LUT_MM 0x0014
+#define regIH_VMID_4_LUT_MM_BASE_IDX 0
+#define regIH_VMID_5_LUT_MM 0x0015
+#define regIH_VMID_5_LUT_MM_BASE_IDX 0
+#define regIH_VMID_6_LUT_MM 0x0016
+#define regIH_VMID_6_LUT_MM_BASE_IDX 0
+#define regIH_VMID_7_LUT_MM 0x0017
+#define regIH_VMID_7_LUT_MM_BASE_IDX 0
+#define regIH_VMID_8_LUT_MM 0x0018
+#define regIH_VMID_8_LUT_MM_BASE_IDX 0
+#define regIH_VMID_9_LUT_MM 0x0019
+#define regIH_VMID_9_LUT_MM_BASE_IDX 0
+#define regIH_VMID_10_LUT_MM 0x001a
+#define regIH_VMID_10_LUT_MM_BASE_IDX 0
+#define regIH_VMID_11_LUT_MM 0x001b
+#define regIH_VMID_11_LUT_MM_BASE_IDX 0
+#define regIH_VMID_12_LUT_MM 0x001c
+#define regIH_VMID_12_LUT_MM_BASE_IDX 0
+#define regIH_VMID_13_LUT_MM 0x001d
+#define regIH_VMID_13_LUT_MM_BASE_IDX 0
+#define regIH_VMID_14_LUT_MM 0x001e
+#define regIH_VMID_14_LUT_MM_BASE_IDX 0
+#define regIH_VMID_15_LUT_MM 0x001f
+#define regIH_VMID_15_LUT_MM_BASE_IDX 0
+#define regIH_COOKIE_0 0x0020
+#define regIH_COOKIE_0_BASE_IDX 0
+#define regIH_COOKIE_1 0x0021
+#define regIH_COOKIE_1_BASE_IDX 0
+#define regIH_COOKIE_2 0x0022
+#define regIH_COOKIE_2_BASE_IDX 0
+#define regIH_COOKIE_3 0x0023
+#define regIH_COOKIE_3_BASE_IDX 0
+#define regIH_COOKIE_4 0x0024
+#define regIH_COOKIE_4_BASE_IDX 0
+#define regIH_COOKIE_5 0x0025
+#define regIH_COOKIE_5_BASE_IDX 0
+#define regIH_COOKIE_6 0x0026
+#define regIH_COOKIE_6_BASE_IDX 0
+#define regIH_COOKIE_7 0x0027
+#define regIH_COOKIE_7_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART0 0x003f
+#define regIH_REGISTER_LAST_PART0_BASE_IDX 0
+#define regIH_RB_CNTL 0x0080
+#define regIH_RB_CNTL_BASE_IDX 0
+#define regIH_RB_RPTR 0x0081
+#define regIH_RB_RPTR_BASE_IDX 0
+#define regIH_RB_WPTR 0x0082
+#define regIH_RB_WPTR_BASE_IDX 0
+#define regIH_RB_BASE 0x0083
+#define regIH_RB_BASE_BASE_IDX 0
+#define regIH_RB_BASE_HI 0x0084
+#define regIH_RB_BASE_HI_BASE_IDX 0
+#define regIH_RB_WPTR_ADDR_HI 0x0085
+#define regIH_RB_WPTR_ADDR_HI_BASE_IDX 0
+#define regIH_RB_WPTR_ADDR_LO 0x0086
+#define regIH_RB_WPTR_ADDR_LO_BASE_IDX 0
+#define regIH_DOORBELL_RPTR 0x0087
+#define regIH_DOORBELL_RPTR_BASE_IDX 0
+#define regIH_DOORBELL_RETRY_CAM 0x0088
+#define regIH_DOORBELL_RETRY_CAM_BASE_IDX 0
+#define regIH_RB_CNTL_RING1 0x008c
+#define regIH_RB_CNTL_RING1_BASE_IDX 0
+#define regIH_RB_RPTR_RING1 0x008d
+#define regIH_RB_RPTR_RING1_BASE_IDX 0
+#define regIH_RB_WPTR_RING1 0x008e
+#define regIH_RB_WPTR_RING1_BASE_IDX 0
+#define regIH_RB_BASE_RING1 0x008f
+#define regIH_RB_BASE_RING1_BASE_IDX 0
+#define regIH_RB_BASE_HI_RING1 0x0090
+#define regIH_RB_BASE_HI_RING1_BASE_IDX 0
+#define regIH_DOORBELL_RPTR_RING1 0x0093
+#define regIH_DOORBELL_RPTR_RING1_BASE_IDX 0
+#define regIH_RETRY_CAM_ACK 0x00a4
+#define regIH_RETRY_CAM_ACK_BASE_IDX 0
+#define regIH_VERSION 0x00a5
+#define regIH_VERSION_BASE_IDX 0
+#define regIH_CNTL 0x00a8
+#define regIH_CNTL_BASE_IDX 0
+#define regIH_CLK_CTRL 0x00a9
+#define regIH_CLK_CTRL_BASE_IDX 0
+#define regIH_STORM_CLIENT_LIST_CNTL 0x00aa
+#define regIH_STORM_CLIENT_LIST_CNTL_BASE_IDX 0
+#define regIH_LIMIT_INT_RATE_CNTL 0x00ab
+#define regIH_LIMIT_INT_RATE_CNTL_BASE_IDX 0
+#define regIH_RETRY_INT_CAM_CNTL 0x00ac
+#define regIH_RETRY_INT_CAM_CNTL_BASE_IDX 0
+#define regIH_MEM_POWER_CTRL 0x00ad
+#define regIH_MEM_POWER_CTRL_BASE_IDX 0
+#define regIH_MEM_POWER_CTRL2 0x00ae
+#define regIH_MEM_POWER_CTRL2_BASE_IDX 0
+#define regIH_CNTL2 0x00c1
+#define regIH_CNTL2_BASE_IDX 0
+#define regIH_STATUS 0x00c2
+#define regIH_STATUS_BASE_IDX 0
+#define regIH_PERFMON_CNTL 0x00c3
+#define regIH_PERFMON_CNTL_BASE_IDX 0
+#define regIH_PERFCOUNTER0_RESULT 0x00c4
+#define regIH_PERFCOUNTER0_RESULT_BASE_IDX 0
+#define regIH_PERFCOUNTER1_RESULT 0x00c5
+#define regIH_PERFCOUNTER1_RESULT_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_31_0 0x00c7
+#define regIH_DSM_MATCH_VALUE_BIT_31_0_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_63_32 0x00c8
+#define regIH_DSM_MATCH_VALUE_BIT_63_32_BASE_IDX 0
+#define regIH_DSM_MATCH_VALUE_BIT_95_64 0x00c9
+#define regIH_DSM_MATCH_VALUE_BIT_95_64_BASE_IDX 0
+#define regIH_DSM_MATCH_FIELD_CONTROL 0x00ca
+#define regIH_DSM_MATCH_FIELD_CONTROL_BASE_IDX 0
+#define regIH_DSM_MATCH_DATA_CONTROL 0x00cb
+#define regIH_DSM_MATCH_DATA_CONTROL_BASE_IDX 0
+#define regIH_DSM_MATCH_FCN_ID 0x00cc
+#define regIH_DSM_MATCH_FCN_ID_BASE_IDX 0
+#define regIH_VF_RB_STATUS 0x00ce
+#define regIH_VF_RB_STATUS_BASE_IDX 0
+#define regIH_VF_RB_STATUS2 0x00cf
+#define regIH_VF_RB_STATUS2_BASE_IDX 0
+#define regIH_VF_RB1_STATUS 0x00d0
+#define regIH_VF_RB1_STATUS_BASE_IDX 0
+#define regIH_VF_RB1_STATUS2 0x00d1
+#define regIH_VF_RB1_STATUS2_BASE_IDX 0
+#define regIH_RB_STATUS 0x00d4
+#define regIH_RB_STATUS_BASE_IDX 0
+#define regIH_INT_FLOOD_CNTL 0x00d5
+#define regIH_INT_FLOOD_CNTL_BASE_IDX 0
+#define regIH_RB0_INT_FLOOD_STATUS 0x00d6
+#define regIH_RB0_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_RB1_INT_FLOOD_STATUS 0x00d7
+#define regIH_RB1_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_INT_FLOOD_STATUS 0x00d9
+#define regIH_INT_FLOOD_STATUS_BASE_IDX 0
+#define regIH_INT_FLAGS 0x00dc
+#define regIH_INT_FLAGS_BASE_IDX 0
+#define regIH_LAST_INT_INFO0 0x00dd
+#define regIH_LAST_INT_INFO0_BASE_IDX 0
+#define regIH_LAST_INT_INFO1 0x00de
+#define regIH_LAST_INT_INFO1_BASE_IDX 0
+#define regIH_LAST_INT_INFO2 0x00df
+#define regIH_LAST_INT_INFO2_BASE_IDX 0
+#define regIH_SCRATCH 0x00e0
+#define regIH_SCRATCH_BASE_IDX 0
+#define regIH_CLIENT_CREDIT_ERROR 0x00e1
+#define regIH_CLIENT_CREDIT_ERROR_BASE_IDX 0
+#define regIH_GPU_IOV_VIOLATION_LOG 0x00e2
+#define regIH_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regIH_GPU_IOV_VIOLATION_LOG2 0x00e3
+#define regIH_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regIH_COOKIE_REC_VIOLATION_LOG 0x00e4
+#define regIH_COOKIE_REC_VIOLATION_LOG_BASE_IDX 0
+#define regIH_CREDIT_STATUS 0x00e5
+#define regIH_CREDIT_STATUS_BASE_IDX 0
+#define regIH_MMHUB_ERROR 0x00e6
+#define regIH_MMHUB_ERROR_BASE_IDX 0
+#define regIH_VF_RB_STATUS3 0x00ea
+#define regIH_VF_RB_STATUS3_BASE_IDX 0
+#define regIH_VF_RB_STATUS4 0x00eb
+#define regIH_VF_RB_STATUS4_BASE_IDX 0
+#define regIH_VF_RB1_STATUS3 0x00ec
+#define regIH_VF_RB1_STATUS3_BASE_IDX 0
+#define regIH_MSI_STORM_CTRL 0x00f1
+#define regIH_MSI_STORM_CTRL_BASE_IDX 0
+#define regIH_MSI_STORM_CLIENT_INDEX 0x00f2
+#define regIH_MSI_STORM_CLIENT_INDEX_BASE_IDX 0
+#define regIH_MSI_STORM_CLIENT_DATA 0x00f3
+#define regIH_MSI_STORM_CLIENT_DATA_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART2 0x00ff
+#define regIH_REGISTER_LAST_PART2_BASE_IDX 0
+#define regSEM_MAILBOX 0x010a
+#define regSEM_MAILBOX_BASE_IDX 0
+#define regSEM_MAILBOX_CLEAR 0x010b
+#define regSEM_MAILBOX_CLEAR_BASE_IDX 0
+#define regSEM_REGISTER_LAST_PART2 0x017f
+#define regSEM_REGISTER_LAST_PART2_BASE_IDX 0
+#define regIH_ACTIVE_FCN_ID 0x0180
+#define regIH_ACTIVE_FCN_ID_BASE_IDX 0
+#define regIH_VIRT_RESET_REQ 0x0181
+#define regIH_VIRT_RESET_REQ_BASE_IDX 0
+#define regIH_CLIENT_CFG 0x0184
+#define regIH_CLIENT_CFG_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_INDEX 0x0185
+#define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_DATA 0x0186
+#define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0
+#define regIH_CLIENT_CFG_INDEX 0x0188
+#define regIH_CLIENT_CFG_INDEX_BASE_IDX 0
+#define regIH_CLIENT_CFG_DATA 0x0189
+#define regIH_CLIENT_CFG_DATA_BASE_IDX 0
+#define regIH_CLIENT_CFG_DATA2 0x018a
+#define regIH_CLIENT_CFG_DATA2_BASE_IDX 0
+#define regIH_CID_REMAP_INDEX 0x018b
+#define regIH_CID_REMAP_INDEX_BASE_IDX 0
+#define regIH_CID_REMAP_DATA 0x018c
+#define regIH_CID_REMAP_DATA_BASE_IDX 0
+#define regIH_CHICKEN 0x018d
+#define regIH_CHICKEN_BASE_IDX 0
+#define regIH_MMHUB_CNTL 0x018e
+#define regIH_MMHUB_CNTL_BASE_IDX 0
+#define regIH_INT_DROP_CNTL 0x018f
+#define regIH_INT_DROP_CNTL_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_VALUE0 0x0190
+#define regIH_INT_DROP_MATCH_VALUE0_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_VALUE1 0x0191
+#define regIH_INT_DROP_MATCH_VALUE1_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_MASK0 0x0192
+#define regIH_INT_DROP_MATCH_MASK0_BASE_IDX 0
+#define regIH_INT_DROP_MATCH_MASK1 0x0193
+#define regIH_INT_DROP_MATCH_MASK1_BASE_IDX 0
+#define regIH_REGISTER_LAST_PART1 0x019f
+#define regIH_REGISTER_LAST_PART1_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_sh_mask.h
new file mode 100644
index 000000000000..15d5689dde65
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_sh_mask.h
@@ -0,0 +1,1019 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _osssys_6_1_0_SH_MASK_HEADER
+#define _osssys_6_1_0_SH_MASK_HEADER
+
+
+// addressBlock: osssys_osssysdec
+//IH_VMID_0_LUT
+#define IH_VMID_0_LUT__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT
+#define IH_VMID_1_LUT__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT
+#define IH_VMID_2_LUT__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT
+#define IH_VMID_3_LUT__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT
+#define IH_VMID_4_LUT__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT
+#define IH_VMID_5_LUT__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT
+#define IH_VMID_6_LUT__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT
+#define IH_VMID_7_LUT__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT
+#define IH_VMID_8_LUT__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT
+#define IH_VMID_9_LUT__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT
+#define IH_VMID_10_LUT__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT
+#define IH_VMID_11_LUT__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT
+#define IH_VMID_12_LUT__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT
+#define IH_VMID_13_LUT__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT
+#define IH_VMID_14_LUT__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT
+#define IH_VMID_15_LUT__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT__PASID_MASK 0x0000FFFFL
+//IH_VMID_0_LUT_MM
+#define IH_VMID_0_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_0_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_1_LUT_MM
+#define IH_VMID_1_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_1_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_2_LUT_MM
+#define IH_VMID_2_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_2_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_3_LUT_MM
+#define IH_VMID_3_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_3_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_4_LUT_MM
+#define IH_VMID_4_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_4_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_5_LUT_MM
+#define IH_VMID_5_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_5_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_6_LUT_MM
+#define IH_VMID_6_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_6_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_7_LUT_MM
+#define IH_VMID_7_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_7_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_8_LUT_MM
+#define IH_VMID_8_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_8_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_9_LUT_MM
+#define IH_VMID_9_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_9_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_10_LUT_MM
+#define IH_VMID_10_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_10_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_11_LUT_MM
+#define IH_VMID_11_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_11_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_12_LUT_MM
+#define IH_VMID_12_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_12_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_13_LUT_MM
+#define IH_VMID_13_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_13_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_14_LUT_MM
+#define IH_VMID_14_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_14_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_VMID_15_LUT_MM
+#define IH_VMID_15_LUT_MM__PASID__SHIFT 0x0
+#define IH_VMID_15_LUT_MM__PASID_MASK 0x0000FFFFL
+//IH_COOKIE_0
+#define IH_COOKIE_0__CLIENT_ID__SHIFT 0x0
+#define IH_COOKIE_0__SOURCE_ID__SHIFT 0x8
+#define IH_COOKIE_0__RING_ID__SHIFT 0x10
+#define IH_COOKIE_0__VM_ID__SHIFT 0x18
+#define IH_COOKIE_0__RESERVED__SHIFT 0x1c
+#define IH_COOKIE_0__VMID_TYPE__SHIFT 0x1f
+#define IH_COOKIE_0__CLIENT_ID_MASK 0x000000FFL
+#define IH_COOKIE_0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_COOKIE_0__RING_ID_MASK 0x00FF0000L
+#define IH_COOKIE_0__VM_ID_MASK 0x0F000000L
+#define IH_COOKIE_0__RESERVED_MASK 0x70000000L
+#define IH_COOKIE_0__VMID_TYPE_MASK 0x80000000L
+//IH_COOKIE_1
+#define IH_COOKIE_1__TIMESTAMP_31_0__SHIFT 0x0
+#define IH_COOKIE_1__TIMESTAMP_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_2
+#define IH_COOKIE_2__TIMESTAMP_47_32__SHIFT 0x0
+#define IH_COOKIE_2__RESERVED__SHIFT 0x10
+#define IH_COOKIE_2__TIMESTAMP_SRC__SHIFT 0x1f
+#define IH_COOKIE_2__TIMESTAMP_47_32_MASK 0x0000FFFFL
+#define IH_COOKIE_2__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_2__TIMESTAMP_SRC_MASK 0x80000000L
+//IH_COOKIE_3
+#define IH_COOKIE_3__PAS_ID__SHIFT 0x0
+#define IH_COOKIE_3__RESERVED__SHIFT 0x10
+#define IH_COOKIE_3__PASID_SRC__SHIFT 0x1f
+#define IH_COOKIE_3__PAS_ID_MASK 0x0000FFFFL
+#define IH_COOKIE_3__RESERVED_MASK 0x7FFF0000L
+#define IH_COOKIE_3__PASID_SRC_MASK 0x80000000L
+//IH_COOKIE_4
+#define IH_COOKIE_4__CONTEXT_ID_31_0__SHIFT 0x0
+#define IH_COOKIE_4__CONTEXT_ID_31_0_MASK 0xFFFFFFFFL
+//IH_COOKIE_5
+#define IH_COOKIE_5__CONTEXT_ID_63_32__SHIFT 0x0
+#define IH_COOKIE_5__CONTEXT_ID_63_32_MASK 0xFFFFFFFFL
+//IH_COOKIE_6
+#define IH_COOKIE_6__CONTEXT_ID_95_64__SHIFT 0x0
+#define IH_COOKIE_6__CONTEXT_ID_95_64_MASK 0xFFFFFFFFL
+//IH_COOKIE_7
+#define IH_COOKIE_7__CONTEXT_ID_128_96__SHIFT 0x0
+#define IH_COOKIE_7__CONTEXT_ID_128_96_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART0
+#define IH_REGISTER_LAST_PART0__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART0__RESERVED_MASK 0xFFFFFFFFL
+//IH_RB_CNTL
+#define IH_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL__ENABLE_INTR__SHIFT 0x11
+#define IH_RB_CNTL__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL__RPTR_REARM__SHIFT 0x15
+#define IH_RB_CNTL__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L
+#define IH_RB_CNTL__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL__ENABLE_INTR_MASK 0x00020000L
+#define IH_RB_CNTL__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL__RPTR_REARM_MASK 0x00200000L
+#define IH_RB_CNTL__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_RPTR
+#define IH_RB_RPTR__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR
+#define IH_RB_WPTR__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_RB_BASE
+#define IH_RB_BASE__ADDR__SHIFT 0x0
+#define IH_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI
+#define IH_RB_BASE_HI__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI__ADDR_MASK 0x000000FFL
+//IH_RB_WPTR_ADDR_HI
+#define IH_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define IH_RB_WPTR_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//IH_RB_WPTR_ADDR_LO
+#define IH_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define IH_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//IH_DOORBELL_RPTR
+#define IH_DOORBELL_RPTR__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR__ENABLE_MASK 0x10000000L
+//IH_DOORBELL_RETRY_CAM
+#define IH_DOORBELL_RETRY_CAM__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RETRY_CAM__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RETRY_CAM__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RETRY_CAM__ENABLE_MASK 0x10000000L
+//IH_RB_CNTL_RING1
+#define IH_RB_CNTL_RING1__RB_ENABLE__SHIFT 0x0
+#define IH_RB_CNTL_RING1__RB_SIZE__SHIFT 0x1
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE__SHIFT 0x9
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR__SHIFT 0xa
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR__SHIFT 0xb
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD__SHIFT 0xc
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE__SHIFT 0x10
+#define IH_RB_CNTL_RING1__MC_SWAP__SHIFT 0x12
+#define IH_RB_CNTL_RING1__MC_SNOOP__SHIFT 0x14
+#define IH_RB_CNTL_RING1__MC_RO__SHIFT 0x16
+#define IH_RB_CNTL_RING1__MC_VMID__SHIFT 0x18
+#define IH_RB_CNTL_RING1__MC_SPACE__SHIFT 0x1c
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f
+#define IH_RB_CNTL_RING1__RB_ENABLE_MASK 0x00000001L
+#define IH_RB_CNTL_RING1__RB_SIZE_MASK 0x0000003EL
+#define IH_RB_CNTL_RING1__RB_FULL_DRAIN_ENABLE_MASK 0x00000200L
+#define IH_RB_CNTL_RING1__FULL_DRAIN_CLEAR_MASK 0x00000400L
+#define IH_RB_CNTL_RING1__PAGE_RB_CLEAR_MASK 0x00000800L
+#define IH_RB_CNTL_RING1__RB_USED_INT_THRESHOLD_MASK 0x0000F000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_ENABLE_MASK 0x00010000L
+#define IH_RB_CNTL_RING1__MC_SWAP_MASK 0x000C0000L
+#define IH_RB_CNTL_RING1__MC_SNOOP_MASK 0x00100000L
+#define IH_RB_CNTL_RING1__MC_RO_MASK 0x00400000L
+#define IH_RB_CNTL_RING1__MC_VMID_MASK 0x0F000000L
+#define IH_RB_CNTL_RING1__MC_SPACE_MASK 0x70000000L
+#define IH_RB_CNTL_RING1__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L
+//IH_RB_RPTR_RING1
+#define IH_RB_RPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_RPTR_RING1__OFFSET_MASK 0x0003FFFCL
+//IH_RB_WPTR_RING1
+#define IH_RB_WPTR_RING1__RB_OVERFLOW__SHIFT 0x0
+#define IH_RB_WPTR_RING1__OFFSET__SHIFT 0x2
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE__SHIFT 0x12
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW__SHIFT 0x13
+#define IH_RB_WPTR_RING1__RB_OVERFLOW_MASK 0x00000001L
+#define IH_RB_WPTR_RING1__OFFSET_MASK 0x0003FFFCL
+#define IH_RB_WPTR_RING1__RB_LEFT_NONE_MASK 0x00040000L
+#define IH_RB_WPTR_RING1__RB_MAY_OVERFLOW_MASK 0x00080000L
+//IH_RB_BASE_RING1
+#define IH_RB_BASE_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_RING1__ADDR_MASK 0xFFFFFFFFL
+//IH_RB_BASE_HI_RING1
+#define IH_RB_BASE_HI_RING1__ADDR__SHIFT 0x0
+#define IH_RB_BASE_HI_RING1__ADDR_MASK 0x000000FFL
+//IH_DOORBELL_RPTR_RING1
+#define IH_DOORBELL_RPTR_RING1__OFFSET__SHIFT 0x0
+#define IH_DOORBELL_RPTR_RING1__ENABLE__SHIFT 0x1c
+#define IH_DOORBELL_RPTR_RING1__OFFSET_MASK 0x03FFFFFFL
+#define IH_DOORBELL_RPTR_RING1__ENABLE_MASK 0x10000000L
+//IH_RETRY_CAM_ACK
+#define IH_RETRY_CAM_ACK__INDEX__SHIFT 0x0
+#define IH_RETRY_CAM_ACK__INDEX_MASK 0x000003FFL
+//IH_VERSION
+#define IH_VERSION__MINVER__SHIFT 0x0
+#define IH_VERSION__MAJVER__SHIFT 0x8
+#define IH_VERSION__REV__SHIFT 0x10
+#define IH_VERSION__MINVER_MASK 0x0000007FL
+#define IH_VERSION__MAJVER_MASK 0x00007F00L
+#define IH_VERSION__REV_MASK 0x003F0000L
+//IH_CNTL
+#define IH_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x0
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL__SHIFT 0x6
+#define IH_CNTL__IH_FIFO_HIGHWATER__SHIFT 0x8
+#define IH_CNTL__MC_WR_CLEAN_CNT__SHIFT 0x14
+#define IH_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x0000001FL
+#define IH_CNTL__IH_IDLE_HYSTERESIS_CNTL_MASK 0x000000C0L
+#define IH_CNTL__IH_FIFO_HIGHWATER_MASK 0x00007F00L
+#define IH_CNTL__MC_WR_CLEAN_CNT_MASK 0x01F00000L
+//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x17
+#define IH_CLK_CTRL__MSI_STORM_COUNTER_CLK_SOFT_OVERRIDE__SHIFT 0x18
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_PASID_LUT_MEM_CLK_SOFT_OVERRIDE_MASK 0x00800000L
+#define IH_CLK_CTRL__MSI_STORM_COUNTER_CLK_SOFT_OVERRIDE_MASK 0x01000000L
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
+#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
+#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
+#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
+#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE_MASK 0x40000000L
+#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE_MASK 0x80000000L
+//IH_STORM_CLIENT_LIST_CNTL
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT__SHIFT 0x1
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT__SHIFT 0x2
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT__SHIFT 0x3
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT__SHIFT 0x4
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT__SHIFT 0x5
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT__SHIFT 0x6
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT__SHIFT 0x7
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT__SHIFT 0x8
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT__SHIFT 0x9
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT__SHIFT 0xa
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT__SHIFT 0xb
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT__SHIFT 0xc
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT__SHIFT 0xd
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT__SHIFT 0xe
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT__SHIFT 0xf
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT__SHIFT 0x10
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT__SHIFT 0x11
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT__SHIFT 0x12
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT__SHIFT 0x13
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT__SHIFT 0x14
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT__SHIFT 0x15
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT__SHIFT 0x16
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT__SHIFT 0x17
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT__SHIFT 0x18
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT__SHIFT 0x19
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT__SHIFT 0x1a
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT__SHIFT 0x1b
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT__SHIFT 0x1c
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT__SHIFT 0x1d
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT__SHIFT 0x1e
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT__SHIFT 0x1f
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT1_IS_STORM_CLIENT_MASK 0x00000002L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT2_IS_STORM_CLIENT_MASK 0x00000004L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT3_IS_STORM_CLIENT_MASK 0x00000008L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT4_IS_STORM_CLIENT_MASK 0x00000010L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT5_IS_STORM_CLIENT_MASK 0x00000020L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT6_IS_STORM_CLIENT_MASK 0x00000040L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT7_IS_STORM_CLIENT_MASK 0x00000080L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT8_IS_STORM_CLIENT_MASK 0x00000100L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT9_IS_STORM_CLIENT_MASK 0x00000200L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT10_IS_STORM_CLIENT_MASK 0x00000400L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT11_IS_STORM_CLIENT_MASK 0x00000800L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT12_IS_STORM_CLIENT_MASK 0x00001000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT13_IS_STORM_CLIENT_MASK 0x00002000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT14_IS_STORM_CLIENT_MASK 0x00004000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT15_IS_STORM_CLIENT_MASK 0x00008000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT16_IS_STORM_CLIENT_MASK 0x00010000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT17_IS_STORM_CLIENT_MASK 0x00020000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT18_IS_STORM_CLIENT_MASK 0x00040000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT19_IS_STORM_CLIENT_MASK 0x00080000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT20_IS_STORM_CLIENT_MASK 0x00100000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT21_IS_STORM_CLIENT_MASK 0x00200000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT22_IS_STORM_CLIENT_MASK 0x00400000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT23_IS_STORM_CLIENT_MASK 0x00800000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT24_IS_STORM_CLIENT_MASK 0x01000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT25_IS_STORM_CLIENT_MASK 0x02000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT26_IS_STORM_CLIENT_MASK 0x04000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT27_IS_STORM_CLIENT_MASK 0x08000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT28_IS_STORM_CLIENT_MASK 0x10000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT29_IS_STORM_CLIENT_MASK 0x20000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
+#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
+//IH_LIMIT_INT_RATE_CNTL
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE__SHIFT 0x0
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL__SHIFT 0x1
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD__SHIFT 0x5
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY__SHIFT 0x11
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT__SHIFT 0x15
+#define IH_LIMIT_INT_RATE_CNTL__LIMIT_ENABLE_MASK 0x00000001L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_INTERVAL_MASK 0x0000001EL
+#define IH_LIMIT_INT_RATE_CNTL__PERF_THRESHOLD_MASK 0x0000FFE0L
+#define IH_LIMIT_INT_RATE_CNTL__RETURN_DELAY_MASK 0x001E0000L
+#define IH_LIMIT_INT_RATE_CNTL__PERF_RESULT_MASK 0xFFE00000L
+//IH_RETRY_INT_CAM_CNTL
+#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE__SHIFT 0x0
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE__SHIFT 0x8
+#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE__SHIFT 0x14
+#define IH_RETRY_INT_CAM_CNTL__CAM_SIZE_MASK 0x0000001FL
+#define IH_RETRY_INT_CAM_CNTL__BACK_PRESSURE_SKID_VALUE_MASK 0x00003F00L
+#define IH_RETRY_INT_CAM_CNTL__PER_VF_ENTRY_SIZE_MASK 0x00300000L
+//IH_MEM_POWER_CTRL
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN__SHIFT 0x1
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN__SHIFT 0x2
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN__SHIFT 0x3
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN__SHIFT 0x10
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN__SHIFT 0x11
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN__SHIFT 0x12
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN__SHIFT 0x13
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS__SHIFT 0x14
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x18
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0x1e
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_LS_EN_MASK 0x00000002L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DS_EN_MASK 0x00000004L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_SD_EN_MASK 0x00000008L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define IH_MEM_POWER_CTRL__IH_BUFFER_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_CTRL_EN_MASK 0x00010000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_LS_EN_MASK 0x00020000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DS_EN_MASK 0x00040000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_SD_EN_MASK 0x00080000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_IDLE_HYSTERESIS_MASK 0x00700000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_UP_RECOVER_DELAY_MASK 0x3F000000L
+#define IH_MEM_POWER_CTRL__IH_RETRY_INT_CAM_MEM_POWER_DOWN_ENTER_DELAY_MASK 0xC0000000L
+//IH_MEM_POWER_CTRL2
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN__SHIFT 0x0
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN__SHIFT 0x1
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN__SHIFT 0x2
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN__SHIFT 0x3
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS__SHIFT 0x4
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY__SHIFT 0x8
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_ENTER_DELAY__SHIFT 0xe
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_LS_EN_MASK 0x00000002L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DS_EN_MASK 0x00000004L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_SD_EN_MASK 0x00000008L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_IDLE_HYSTERESIS_MASK 0x00000070L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_UP_RECOVER_DELAY_MASK 0x00003F00L
+#define IH_MEM_POWER_CTRL2__IH_PASID_LUT_MEM_POWER_DOWN_ENTER_DELAY_MASK 0x0000C000L
+//IH_CNTL2
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT__SHIFT 0x0
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE__SHIFT 0x8
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT_MASK 0x0000001FL
+#define IH_CNTL2__SELF_IV_FORCE_WPTR_UPDATE_ENABLE_MASK 0x00000100L
+//IH_STATUS
+#define IH_STATUS__IDLE__SHIFT 0x0
+#define IH_STATUS__INPUT_IDLE__SHIFT 0x1
+#define IH_STATUS__BUFFER_IDLE__SHIFT 0x2
+#define IH_STATUS__RB_FULL__SHIFT 0x3
+#define IH_STATUS__RB_FULL_DRAIN__SHIFT 0x4
+#define IH_STATUS__RB_OVERFLOW__SHIFT 0x5
+#define IH_STATUS__MC_WR_IDLE__SHIFT 0x6
+#define IH_STATUS__MC_WR_STALL__SHIFT 0x7
+#define IH_STATUS__MC_WR_CLEAN_PENDING__SHIFT 0x8
+#define IH_STATUS__MC_WR_CLEAN_STALL__SHIFT 0x9
+#define IH_STATUS__BIF_INTERRUPT_LINE__SHIFT 0xa
+#define IH_STATUS__SWITCH_READY__SHIFT 0xb
+#define IH_STATUS__RB1_FULL__SHIFT 0xc
+#define IH_STATUS__RB1_FULL_DRAIN__SHIFT 0xd
+#define IH_STATUS__RB1_OVERFLOW__SHIFT 0xe
+#define IH_STATUS__SELF_INT_GEN_IDLE__SHIFT 0x12
+#define IH_STATUS__RETRY_INT_CAM_IDLE__SHIFT 0x13
+#define IH_STATUS__ZSTATES_FENCE__SHIFT 0x14
+#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED__SHIFT 0x15
+#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED__SHIFT 0x16
+#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED__SHIFT 0x17
+#define IH_STATUS__IDLE_MASK 0x00000001L
+#define IH_STATUS__INPUT_IDLE_MASK 0x00000002L
+#define IH_STATUS__BUFFER_IDLE_MASK 0x00000004L
+#define IH_STATUS__RB_FULL_MASK 0x00000008L
+#define IH_STATUS__RB_FULL_DRAIN_MASK 0x00000010L
+#define IH_STATUS__RB_OVERFLOW_MASK 0x00000020L
+#define IH_STATUS__MC_WR_IDLE_MASK 0x00000040L
+#define IH_STATUS__MC_WR_STALL_MASK 0x00000080L
+#define IH_STATUS__MC_WR_CLEAN_PENDING_MASK 0x00000100L
+#define IH_STATUS__MC_WR_CLEAN_STALL_MASK 0x00000200L
+#define IH_STATUS__BIF_INTERRUPT_LINE_MASK 0x00000400L
+#define IH_STATUS__SWITCH_READY_MASK 0x00000800L
+#define IH_STATUS__RB1_FULL_MASK 0x00001000L
+#define IH_STATUS__RB1_FULL_DRAIN_MASK 0x00002000L
+#define IH_STATUS__RB1_OVERFLOW_MASK 0x00004000L
+#define IH_STATUS__SELF_INT_GEN_IDLE_MASK 0x00040000L
+#define IH_STATUS__RETRY_INT_CAM_IDLE_MASK 0x00080000L
+#define IH_STATUS__ZSTATES_FENCE_MASK 0x00100000L
+#define IH_STATUS__IH_BUFFER_MEM_POWER_GATED_MASK 0x00200000L
+#define IH_STATUS__IH_RETRY_INT_CAM_MEM_POWER_GATED_MASK 0x00400000L
+#define IH_STATUS__IH_PASID_LUT_MEM_POWER_GATED_MASK 0x00800000L
+//IH_PERFMON_CNTL
+#define IH_PERFMON_CNTL__ENABLE0__SHIFT 0x0
+#define IH_PERFMON_CNTL__CLEAR0__SHIFT 0x1
+#define IH_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2
+#define IH_PERFMON_CNTL__ENABLE1__SHIFT 0x10
+#define IH_PERFMON_CNTL__CLEAR1__SHIFT 0x11
+#define IH_PERFMON_CNTL__PERF_SEL1__SHIFT 0x12
+#define IH_PERFMON_CNTL__ENABLE0_MASK 0x00000001L
+#define IH_PERFMON_CNTL__CLEAR0_MASK 0x00000002L
+#define IH_PERFMON_CNTL__PERF_SEL0_MASK 0x00000FFCL
+#define IH_PERFMON_CNTL__ENABLE1_MASK 0x00010000L
+#define IH_PERFMON_CNTL__CLEAR1_MASK 0x00020000L
+#define IH_PERFMON_CNTL__PERF_SEL1_MASK 0x0FFC0000L
+//IH_PERFCOUNTER0_RESULT
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_PERFCOUNTER1_RESULT
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0
+#define IH_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_31_0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_31_0__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_63_32
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_63_32__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_VALUE_BIT_95_64
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_VALUE_BIT_95_64__VALUE_MASK 0xFFFFFFFFL
+//IH_DSM_MATCH_FIELD_CONTROL
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN__SHIFT 0x0
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN__SHIFT 0x1
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN__SHIFT 0x2
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN__SHIFT 0x3
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN__SHIFT 0x4
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN__SHIFT 0x5
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN__SHIFT 0x6
+#define IH_DSM_MATCH_FIELD_CONTROL__SRC_EN_MASK 0x00000001L
+#define IH_DSM_MATCH_FIELD_CONTROL__FCNID_EN_MASK 0x00000002L
+#define IH_DSM_MATCH_FIELD_CONTROL__TIMESTAMP_EN_MASK 0x00000004L
+#define IH_DSM_MATCH_FIELD_CONTROL__RINGID_EN_MASK 0x00000008L
+#define IH_DSM_MATCH_FIELD_CONTROL__VMID_EN_MASK 0x00000010L
+#define IH_DSM_MATCH_FIELD_CONTROL__PASID_EN_MASK 0x00000020L
+#define IH_DSM_MATCH_FIELD_CONTROL__CLIENT_ID_EN_MASK 0x00000040L
+//IH_DSM_MATCH_DATA_CONTROL
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE__SHIFT 0x0
+#define IH_DSM_MATCH_DATA_CONTROL__VALUE_MASK 0x0FFFFFFFL
+//IH_DSM_MATCH_FCN_ID
+#define IH_DSM_MATCH_FCN_ID__VF_ID__SHIFT 0x0
+#define IH_DSM_MATCH_FCN_ID__PF_VF__SHIFT 0x7
+#define IH_DSM_MATCH_FCN_ID__VF_ID_MASK 0x0000000FL
+#define IH_DSM_MATCH_FCN_ID__PF_VF_MASK 0x00000080L
+//IH_VF_RB_STATUS
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+//IH_VF_RB_STATUS2
+#define IH_VF_RB_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+//IH_VF_RB1_STATUS
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS__RB_FULL_DRAIN_VF_MASK 0x0000FFFFL
+//IH_VF_RB1_STATUS2
+#define IH_VF_RB1_STATUS2__RB_FULL_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS2__RB_FULL_VF_MASK 0x0000FFFFL
+//IH_RB_STATUS
+#define IH_RB_STATUS__RB_FULL__SHIFT 0x0
+#define IH_RB_STATUS__RB_FULL_DRAIN__SHIFT 0x1
+#define IH_RB_STATUS__RB_OVERFLOW__SHIFT 0x2
+#define IH_RB_STATUS__RB1_FULL__SHIFT 0x4
+#define IH_RB_STATUS__RB1_FULL_DRAIN__SHIFT 0x5
+#define IH_RB_STATUS__RB1_OVERFLOW__SHIFT 0x6
+#define IH_RB_STATUS__RB_FULL_MASK 0x00000001L
+#define IH_RB_STATUS__RB_FULL_DRAIN_MASK 0x00000002L
+#define IH_RB_STATUS__RB_OVERFLOW_MASK 0x00000004L
+#define IH_RB_STATUS__RB1_FULL_MASK 0x00000010L
+#define IH_RB_STATUS__RB1_FULL_DRAIN_MASK 0x00000020L
+#define IH_RB_STATUS__RB1_OVERFLOW_MASK 0x00000040L
+//IH_INT_FLOOD_CNTL
+#define IH_INT_FLOOD_CNTL__HIGHWATER__SHIFT 0x0
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE__SHIFT 0x3
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS__SHIFT 0x4
+#define IH_INT_FLOOD_CNTL__HIGHWATER_MASK 0x00000007L
+#define IH_INT_FLOOD_CNTL__FLOOD_CNTL_ENABLE_MASK 0x00000008L
+#define IH_INT_FLOOD_CNTL__CLEAR_INT_FLOOD_STATUS_MASK 0x00000010L
+//IH_RB0_INT_FLOOD_STATUS
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB0_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_RB1_INT_FLOOD_STATUS
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF__SHIFT 0x0
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED__SHIFT 0x1f
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_VF_MASK 0x0000FFFFL
+#define IH_RB1_INT_FLOOD_STATUS__RB_INT_DROPPED_MASK 0x80000000L
+//IH_INT_FLOOD_STATUS
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT__SHIFT 0x0
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID__SHIFT 0x8
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID__SHIFT 0x10
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID__SHIFT 0x18
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF__SHIFT 0x1d
+#define IH_INT_FLOOD_STATUS__INT_DROPPED__SHIFT 0x1e
+#define IH_INT_FLOOD_STATUS__INT_DROP_CNT_MASK 0x000000FFL
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_CLIENT_ID_MASK 0x0000FF00L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_SOURCE_ID_MASK 0x00FF0000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_ID_MASK 0x0F000000L
+#define IH_INT_FLOOD_STATUS__FIRST_DROP_INT_VF_MASK 0x20000000L
+#define IH_INT_FLOOD_STATUS__INT_DROPPED_MASK 0x40000000L
+//IH_INT_FLAGS
+#define IH_INT_FLAGS__CLIENT_0_FLAG__SHIFT 0x0
+#define IH_INT_FLAGS__CLIENT_1_FLAG__SHIFT 0x1
+#define IH_INT_FLAGS__CLIENT_2_FLAG__SHIFT 0x2
+#define IH_INT_FLAGS__CLIENT_3_FLAG__SHIFT 0x3
+#define IH_INT_FLAGS__CLIENT_4_FLAG__SHIFT 0x4
+#define IH_INT_FLAGS__CLIENT_5_FLAG__SHIFT 0x5
+#define IH_INT_FLAGS__CLIENT_6_FLAG__SHIFT 0x6
+#define IH_INT_FLAGS__CLIENT_7_FLAG__SHIFT 0x7
+#define IH_INT_FLAGS__CLIENT_8_FLAG__SHIFT 0x8
+#define IH_INT_FLAGS__CLIENT_9_FLAG__SHIFT 0x9
+#define IH_INT_FLAGS__CLIENT_10_FLAG__SHIFT 0xa
+#define IH_INT_FLAGS__CLIENT_11_FLAG__SHIFT 0xb
+#define IH_INT_FLAGS__CLIENT_12_FLAG__SHIFT 0xc
+#define IH_INT_FLAGS__CLIENT_13_FLAG__SHIFT 0xd
+#define IH_INT_FLAGS__CLIENT_14_FLAG__SHIFT 0xe
+#define IH_INT_FLAGS__CLIENT_15_FLAG__SHIFT 0xf
+#define IH_INT_FLAGS__CLIENT_16_FLAG__SHIFT 0x10
+#define IH_INT_FLAGS__CLIENT_17_FLAG__SHIFT 0x11
+#define IH_INT_FLAGS__CLIENT_18_FLAG__SHIFT 0x12
+#define IH_INT_FLAGS__CLIENT_19_FLAG__SHIFT 0x13
+#define IH_INT_FLAGS__CLIENT_20_FLAG__SHIFT 0x14
+#define IH_INT_FLAGS__CLIENT_21_FLAG__SHIFT 0x15
+#define IH_INT_FLAGS__CLIENT_22_FLAG__SHIFT 0x16
+#define IH_INT_FLAGS__CLIENT_23_FLAG__SHIFT 0x17
+#define IH_INT_FLAGS__CLIENT_24_FLAG__SHIFT 0x18
+#define IH_INT_FLAGS__CLIENT_25_FLAG__SHIFT 0x19
+#define IH_INT_FLAGS__CLIENT_26_FLAG__SHIFT 0x1a
+#define IH_INT_FLAGS__CLIENT_27_FLAG__SHIFT 0x1b
+#define IH_INT_FLAGS__CLIENT_28_FLAG__SHIFT 0x1c
+#define IH_INT_FLAGS__CLIENT_29_FLAG__SHIFT 0x1d
+#define IH_INT_FLAGS__CLIENT_30_FLAG__SHIFT 0x1e
+#define IH_INT_FLAGS__CLIENT_31_FLAG__SHIFT 0x1f
+#define IH_INT_FLAGS__CLIENT_0_FLAG_MASK 0x00000001L
+#define IH_INT_FLAGS__CLIENT_1_FLAG_MASK 0x00000002L
+#define IH_INT_FLAGS__CLIENT_2_FLAG_MASK 0x00000004L
+#define IH_INT_FLAGS__CLIENT_3_FLAG_MASK 0x00000008L
+#define IH_INT_FLAGS__CLIENT_4_FLAG_MASK 0x00000010L
+#define IH_INT_FLAGS__CLIENT_5_FLAG_MASK 0x00000020L
+#define IH_INT_FLAGS__CLIENT_6_FLAG_MASK 0x00000040L
+#define IH_INT_FLAGS__CLIENT_7_FLAG_MASK 0x00000080L
+#define IH_INT_FLAGS__CLIENT_8_FLAG_MASK 0x00000100L
+#define IH_INT_FLAGS__CLIENT_9_FLAG_MASK 0x00000200L
+#define IH_INT_FLAGS__CLIENT_10_FLAG_MASK 0x00000400L
+#define IH_INT_FLAGS__CLIENT_11_FLAG_MASK 0x00000800L
+#define IH_INT_FLAGS__CLIENT_12_FLAG_MASK 0x00001000L
+#define IH_INT_FLAGS__CLIENT_13_FLAG_MASK 0x00002000L
+#define IH_INT_FLAGS__CLIENT_14_FLAG_MASK 0x00004000L
+#define IH_INT_FLAGS__CLIENT_15_FLAG_MASK 0x00008000L
+#define IH_INT_FLAGS__CLIENT_16_FLAG_MASK 0x00010000L
+#define IH_INT_FLAGS__CLIENT_17_FLAG_MASK 0x00020000L
+#define IH_INT_FLAGS__CLIENT_18_FLAG_MASK 0x00040000L
+#define IH_INT_FLAGS__CLIENT_19_FLAG_MASK 0x00080000L
+#define IH_INT_FLAGS__CLIENT_20_FLAG_MASK 0x00100000L
+#define IH_INT_FLAGS__CLIENT_21_FLAG_MASK 0x00200000L
+#define IH_INT_FLAGS__CLIENT_22_FLAG_MASK 0x00400000L
+#define IH_INT_FLAGS__CLIENT_23_FLAG_MASK 0x00800000L
+#define IH_INT_FLAGS__CLIENT_24_FLAG_MASK 0x01000000L
+#define IH_INT_FLAGS__CLIENT_25_FLAG_MASK 0x02000000L
+#define IH_INT_FLAGS__CLIENT_26_FLAG_MASK 0x04000000L
+#define IH_INT_FLAGS__CLIENT_27_FLAG_MASK 0x08000000L
+#define IH_INT_FLAGS__CLIENT_28_FLAG_MASK 0x10000000L
+#define IH_INT_FLAGS__CLIENT_29_FLAG_MASK 0x20000000L
+#define IH_INT_FLAGS__CLIENT_30_FLAG_MASK 0x40000000L
+#define IH_INT_FLAGS__CLIENT_31_FLAG_MASK 0x80000000L
+//IH_LAST_INT_INFO0
+#define IH_LAST_INT_INFO0__CLIENT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO0__SOURCE_ID__SHIFT 0x8
+#define IH_LAST_INT_INFO0__RING_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO0__VM_ID__SHIFT 0x18
+#define IH_LAST_INT_INFO0__VMID_TYPE__SHIFT 0x1f
+#define IH_LAST_INT_INFO0__CLIENT_ID_MASK 0x000000FFL
+#define IH_LAST_INT_INFO0__SOURCE_ID_MASK 0x0000FF00L
+#define IH_LAST_INT_INFO0__RING_ID_MASK 0x00FF0000L
+#define IH_LAST_INT_INFO0__VM_ID_MASK 0x0F000000L
+#define IH_LAST_INT_INFO0__VMID_TYPE_MASK 0x80000000L
+//IH_LAST_INT_INFO1
+#define IH_LAST_INT_INFO1__CONTEXT_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO1__CONTEXT_ID_MASK 0xFFFFFFFFL
+//IH_LAST_INT_INFO2
+#define IH_LAST_INT_INFO2__PAS_ID__SHIFT 0x0
+#define IH_LAST_INT_INFO2__VF_ID__SHIFT 0x10
+#define IH_LAST_INT_INFO2__VF__SHIFT 0x17
+#define IH_LAST_INT_INFO2__PAS_ID_MASK 0x0000FFFFL
+#define IH_LAST_INT_INFO2__VF_ID_MASK 0x000F0000L
+#define IH_LAST_INT_INFO2__VF_MASK 0x00800000L
+//IH_SCRATCH
+#define IH_SCRATCH__DATA__SHIFT 0x0
+#define IH_SCRATCH__DATA_MASK 0xFFFFFFFFL
+//IH_CLIENT_CREDIT_ERROR
+#define IH_CLIENT_CREDIT_ERROR__CLEAR__SHIFT 0x0
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR__SHIFT 0x1
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR__SHIFT 0x2
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR__SHIFT 0x3
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR__SHIFT 0x4
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR__SHIFT 0x5
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR__SHIFT 0x6
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR__SHIFT 0x7
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR__SHIFT 0x8
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR__SHIFT 0x9
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR__SHIFT 0xa
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR__SHIFT 0xb
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR__SHIFT 0xc
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR__SHIFT 0xd
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR__SHIFT 0xe
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR__SHIFT 0xf
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR__SHIFT 0x10
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR__SHIFT 0x11
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR__SHIFT 0x12
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR__SHIFT 0x13
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR__SHIFT 0x14
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR__SHIFT 0x15
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR__SHIFT 0x16
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR__SHIFT 0x17
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR__SHIFT 0x18
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR__SHIFT 0x19
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR__SHIFT 0x1a
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR__SHIFT 0x1b
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR__SHIFT 0x1c
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR__SHIFT 0x1d
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR__SHIFT 0x1e
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR__SHIFT 0x1f
+#define IH_CLIENT_CREDIT_ERROR__CLEAR_MASK 0x00000001L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_1_ERROR_MASK 0x00000002L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_2_ERROR_MASK 0x00000004L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_3_ERROR_MASK 0x00000008L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_4_ERROR_MASK 0x00000010L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_5_ERROR_MASK 0x00000020L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_6_ERROR_MASK 0x00000040L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_7_ERROR_MASK 0x00000080L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_8_ERROR_MASK 0x00000100L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_9_ERROR_MASK 0x00000200L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_10_ERROR_MASK 0x00000400L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_11_ERROR_MASK 0x00000800L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_12_ERROR_MASK 0x00001000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_13_ERROR_MASK 0x00002000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_14_ERROR_MASK 0x00004000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_15_ERROR_MASK 0x00008000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_16_ERROR_MASK 0x00010000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_17_ERROR_MASK 0x00020000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_18_ERROR_MASK 0x00040000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_19_ERROR_MASK 0x00080000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_20_ERROR_MASK 0x00100000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_21_ERROR_MASK 0x00200000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_22_ERROR_MASK 0x00400000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_23_ERROR_MASK 0x00800000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_24_ERROR_MASK 0x01000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_25_ERROR_MASK 0x02000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_26_ERROR_MASK 0x04000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_27_ERROR_MASK 0x08000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_28_ERROR_MASK 0x10000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_29_ERROR_MASK 0x20000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_30_ERROR_MASK 0x40000000L
+#define IH_CLIENT_CREDIT_ERROR__CLIENT_31_ERROR_MASK 0x80000000L
+//IH_GPU_IOV_VIOLATION_LOG
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE__SHIFT 0x16
+#define IH_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x17
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID__SHIFT 0x18
+#define IH_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define IH_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define IH_GPU_IOV_VIOLATION_LOG__OPCODE_MASK 0x00400000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00800000L
+#define IH_GPU_IOV_VIOLATION_LOG__VF_ID_MASK 0x0F000000L
+//IH_GPU_IOV_VIOLATION_LOG2
+#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define IH_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//IH_COOKIE_REC_VIOLATION_LOG
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID__SHIFT 0x8
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x10
+#define IH_COOKIE_REC_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define IH_COOKIE_REC_VIOLATION_LOG__CLIENT_ID_MASK 0x0000FF00L
+#define IH_COOKIE_REC_VIOLATION_LOG__INITIATOR_ID_MASK 0x03FF0000L
+//IH_CREDIT_STATUS
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED__SHIFT 0x1
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED__SHIFT 0x2
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED__SHIFT 0x3
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED__SHIFT 0x4
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED__SHIFT 0x5
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED__SHIFT 0x6
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED__SHIFT 0x7
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED__SHIFT 0x8
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED__SHIFT 0x9
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED__SHIFT 0xa
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED__SHIFT 0xb
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED__SHIFT 0xc
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED__SHIFT 0xd
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED__SHIFT 0xe
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED__SHIFT 0xf
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED__SHIFT 0x10
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED__SHIFT 0x11
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED__SHIFT 0x12
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED__SHIFT 0x13
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED__SHIFT 0x14
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED__SHIFT 0x15
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED__SHIFT 0x16
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED__SHIFT 0x17
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED__SHIFT 0x18
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED__SHIFT 0x19
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED__SHIFT 0x1a
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED__SHIFT 0x1b
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED__SHIFT 0x1c
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED__SHIFT 0x1d
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED__SHIFT 0x1e
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED__SHIFT 0x1f
+#define IH_CREDIT_STATUS__CLIENT_1_CREDIT_RETURNED_MASK 0x00000002L
+#define IH_CREDIT_STATUS__CLIENT_2_CREDIT_RETURNED_MASK 0x00000004L
+#define IH_CREDIT_STATUS__CLIENT_3_CREDIT_RETURNED_MASK 0x00000008L
+#define IH_CREDIT_STATUS__CLIENT_4_CREDIT_RETURNED_MASK 0x00000010L
+#define IH_CREDIT_STATUS__CLIENT_5_CREDIT_RETURNED_MASK 0x00000020L
+#define IH_CREDIT_STATUS__CLIENT_6_CREDIT_RETURNED_MASK 0x00000040L
+#define IH_CREDIT_STATUS__CLIENT_7_CREDIT_RETURNED_MASK 0x00000080L
+#define IH_CREDIT_STATUS__CLIENT_8_CREDIT_RETURNED_MASK 0x00000100L
+#define IH_CREDIT_STATUS__CLIENT_9_CREDIT_RETURNED_MASK 0x00000200L
+#define IH_CREDIT_STATUS__CLIENT_10_CREDIT_RETURNED_MASK 0x00000400L
+#define IH_CREDIT_STATUS__CLIENT_11_CREDIT_RETURNED_MASK 0x00000800L
+#define IH_CREDIT_STATUS__CLIENT_12_CREDIT_RETURNED_MASK 0x00001000L
+#define IH_CREDIT_STATUS__CLIENT_13_CREDIT_RETURNED_MASK 0x00002000L
+#define IH_CREDIT_STATUS__CLIENT_14_CREDIT_RETURNED_MASK 0x00004000L
+#define IH_CREDIT_STATUS__CLIENT_15_CREDIT_RETURNED_MASK 0x00008000L
+#define IH_CREDIT_STATUS__CLIENT_16_CREDIT_RETURNED_MASK 0x00010000L
+#define IH_CREDIT_STATUS__CLIENT_17_CREDIT_RETURNED_MASK 0x00020000L
+#define IH_CREDIT_STATUS__CLIENT_18_CREDIT_RETURNED_MASK 0x00040000L
+#define IH_CREDIT_STATUS__CLIENT_19_CREDIT_RETURNED_MASK 0x00080000L
+#define IH_CREDIT_STATUS__CLIENT_20_CREDIT_RETURNED_MASK 0x00100000L
+#define IH_CREDIT_STATUS__CLIENT_21_CREDIT_RETURNED_MASK 0x00200000L
+#define IH_CREDIT_STATUS__CLIENT_22_CREDIT_RETURNED_MASK 0x00400000L
+#define IH_CREDIT_STATUS__CLIENT_23_CREDIT_RETURNED_MASK 0x00800000L
+#define IH_CREDIT_STATUS__CLIENT_24_CREDIT_RETURNED_MASK 0x01000000L
+#define IH_CREDIT_STATUS__CLIENT_25_CREDIT_RETURNED_MASK 0x02000000L
+#define IH_CREDIT_STATUS__CLIENT_26_CREDIT_RETURNED_MASK 0x04000000L
+#define IH_CREDIT_STATUS__CLIENT_27_CREDIT_RETURNED_MASK 0x08000000L
+#define IH_CREDIT_STATUS__CLIENT_28_CREDIT_RETURNED_MASK 0x10000000L
+#define IH_CREDIT_STATUS__CLIENT_29_CREDIT_RETURNED_MASK 0x20000000L
+#define IH_CREDIT_STATUS__CLIENT_30_CREDIT_RETURNED_MASK 0x40000000L
+#define IH_CREDIT_STATUS__CLIENT_31_CREDIT_RETURNED_MASK 0x80000000L
+//IH_MMHUB_ERROR
+#define IH_MMHUB_ERROR__IH_BRESP_01__SHIFT 0x1
+#define IH_MMHUB_ERROR__IH_BRESP_10__SHIFT 0x2
+#define IH_MMHUB_ERROR__IH_BRESP_11__SHIFT 0x3
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01__SHIFT 0x5
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10__SHIFT 0x6
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11__SHIFT 0x7
+#define IH_MMHUB_ERROR__IH_BRESP_01_MASK 0x00000002L
+#define IH_MMHUB_ERROR__IH_BRESP_10_MASK 0x00000004L
+#define IH_MMHUB_ERROR__IH_BRESP_11_MASK 0x00000008L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_01_MASK 0x00000020L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_10_MASK 0x00000040L
+#define IH_MMHUB_ERROR__IH_BUSER_NACK_11_MASK 0x00000080L
+//IH_VF_RB_STATUS3
+#define IH_VF_RB_STATUS3__RB_OVERFLOW_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS3__RB_OVERFLOW_VF_MASK 0x0000FFFFL
+//IH_VF_RB_STATUS4
+#define IH_VF_RB_STATUS4__BIF_INTERRUPT_LINE_VF__SHIFT 0x0
+#define IH_VF_RB_STATUS4__BIF_INTERRUPT_LINE_VF_MASK 0x0000FFFFL
+//IH_VF_RB1_STATUS3
+#define IH_VF_RB1_STATUS3__RB_OVERFLOW_VF__SHIFT 0x0
+#define IH_VF_RB1_STATUS3__RB_OVERFLOW_VF_MASK 0x0000FFFFL
+//IH_MSI_STORM_CTRL
+#define IH_MSI_STORM_CTRL__DELAY__SHIFT 0x0
+#define IH_MSI_STORM_CTRL__DELAY_MASK 0x00000FFFL
+//IH_MSI_STORM_CLIENT_INDEX
+#define IH_MSI_STORM_CLIENT_INDEX__INDEX__SHIFT 0x0
+#define IH_MSI_STORM_CLIENT_INDEX__INDEX_MASK 0x00000007L
+//IH_MSI_STORM_CLIENT_DATA
+#define IH_MSI_STORM_CLIENT_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID__SHIFT 0x8
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10
+#define IH_MSI_STORM_CLIENT_DATA__UTCL2_PAGE_FAULT_MATCH_ENABLE__SHIFT 0x11
+#define IH_MSI_STORM_CLIENT_DATA__ENTRY_VALID__SHIFT 0x1f
+#define IH_MSI_STORM_CLIENT_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MASK 0x0000FF00L
+#define IH_MSI_STORM_CLIENT_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L
+#define IH_MSI_STORM_CLIENT_DATA__UTCL2_PAGE_FAULT_MATCH_ENABLE_MASK 0x00020000L
+#define IH_MSI_STORM_CLIENT_DATA__ENTRY_VALID_MASK 0x80000000L
+//IH_REGISTER_LAST_PART2
+#define IH_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//SEM_MAILBOX
+#define SEM_MAILBOX__HOSTPORT__SHIFT 0x0
+#define SEM_MAILBOX__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX__HOSTPORT_MASK 0x0000FFFFL
+#define SEM_MAILBOX__RESERVED_MASK 0xFFFF0000L
+//SEM_MAILBOX_CLEAR
+#define SEM_MAILBOX_CLEAR__CLEAR__SHIFT 0x0
+#define SEM_MAILBOX_CLEAR__RESERVED__SHIFT 0x10
+#define SEM_MAILBOX_CLEAR__CLEAR_MASK 0x0000FFFFL
+#define SEM_MAILBOX_CLEAR__RESERVED_MASK 0xFFFF0000L
+//SEM_REGISTER_LAST_PART2
+#define SEM_REGISTER_LAST_PART2__RESERVED__SHIFT 0x0
+#define SEM_REGISTER_LAST_PART2__RESERVED_MASK 0xFFFFFFFFL
+//IH_ACTIVE_FCN_ID
+#define IH_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define IH_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define IH_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define IH_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
+#define IH_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define IH_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//IH_VIRT_RESET_REQ
+#define IH_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define IH_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define IH_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define IH_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//IH_CLIENT_CFG
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0
+#define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000003FL
+//IH_RING1_CLIENT_CFG_INDEX
+#define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L
+//IH_RING1_CLIENT_CFG_DATA
+#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10
+#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L
+#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L
+//IH_CLIENT_CFG_INDEX
+#define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0
+#define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL
+//IH_CLIENT_CFG_DATA
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE__SHIFT 0x12
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT__SHIFT 0x16
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID__SHIFT 0x18
+#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE__SHIFT 0x19
+#define IH_CLIENT_CFG_DATA__CLIENT_TYPE_MASK 0x000C0000L
+#define IH_CLIENT_CFG_DATA__VF_RB_SELECT_MASK 0x00C00000L
+#define IH_CLIENT_CFG_DATA__OVERWRITE_RING_ID_WITH_ACTIVE_FCN_ID_MASK 0x01000000L
+#define IH_CLIENT_CFG_DATA__INTERFACE_TYPE_MASK 0x02000000L
+//IH_CLIENT_CFG_DATA2
+#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR__SHIFT 0x0
+#define IH_CLIENT_CFG_DATA2__CREDIT_RETURN_ADDR_MASK 0xFFFFFFFFL
+//IH_CID_REMAP_INDEX
+#define IH_CID_REMAP_INDEX__INDEX__SHIFT 0x0
+#define IH_CID_REMAP_INDEX__INDEX_MASK 0x00000003L
+//IH_CID_REMAP_DATA
+#define IH_CID_REMAP_DATA__CLIENT_ID__SHIFT 0x0
+#define IH_CID_REMAP_DATA__INITIATOR_ID__SHIFT 0x8
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP__SHIFT 0x18
+#define IH_CID_REMAP_DATA__CLIENT_ID_MASK 0x000000FFL
+#define IH_CID_REMAP_DATA__INITIATOR_ID_MASK 0x0003FF00L
+#define IH_CID_REMAP_DATA__CLIENT_ID_REMAP_MASK 0xFF000000L
+//IH_CHICKEN
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE__SHIFT 0x0
+#define IH_CHICKEN__DBGU_TRIGGER_ENABLE__SHIFT 0x1
+#define IH_CHICKEN__CROSS_TRIGGER_ENABLE__SHIFT 0x2
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE__SHIFT 0x3
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE__SHIFT 0x4
+#define IH_CHICKEN__REG_FIREWALL_ENABLE__SHIFT 0x5
+#define IH_CHICKEN__ACTIVE_FCN_ID_PROT_ENABLE_MASK 0x00000001L
+#define IH_CHICKEN__DBGU_TRIGGER_ENABLE_MASK 0x00000002L
+#define IH_CHICKEN__CROSS_TRIGGER_ENABLE_MASK 0x00000004L
+#define IH_CHICKEN__MC_SPACE_FBPA_ENABLE_MASK 0x00000008L
+#define IH_CHICKEN__MC_SPACE_GPA_ENABLE_MASK 0x00000010L
+#define IH_CHICKEN__REG_FIREWALL_ENABLE_MASK 0x00000020L
+//IH_MMHUB_CNTL
+#define IH_MMHUB_CNTL__UNITID__SHIFT 0x0
+#define IH_MMHUB_CNTL__IV_TLVL__SHIFT 0x8
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL__SHIFT 0xc
+#define IH_MMHUB_CNTL__UNITID_MASK 0x0000003FL
+#define IH_MMHUB_CNTL__IV_TLVL_MASK 0x00000F00L
+#define IH_MMHUB_CNTL__WPTR_WB_TLVL_MASK 0x0000F000L
+//IH_INT_DROP_CNTL
+#define IH_INT_DROP_CNTL__INT_DROP_EN__SHIFT 0x0
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN__SHIFT 0x1
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN__SHIFT 0x2
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN__SHIFT 0x3
+#define IH_INT_DROP_CNTL__VF_MATCH_EN__SHIFT 0x4
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN__SHIFT 0x5
+#define IH_INT_DROP_CNTL__INT_DROP_MODE__SHIFT 0x6
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN__SHIFT 0x8
+#define IH_INT_DROP_CNTL__INT_DROPPED__SHIFT 0x10
+#define IH_INT_DROP_CNTL__INT_DROP_EN_MASK 0x00000001L
+#define IH_INT_DROP_CNTL__CLIENT_ID_MATCH_EN_MASK 0x00000002L
+#define IH_INT_DROP_CNTL__SOURCE_ID_MATCH_EN_MASK 0x00000004L
+#define IH_INT_DROP_CNTL__VF_ID_MATCH_EN_MASK 0x00000008L
+#define IH_INT_DROP_CNTL__VF_MATCH_EN_MASK 0x00000010L
+#define IH_INT_DROP_CNTL__CONTEXT_ID_MATCH_EN_MASK 0x00000020L
+#define IH_INT_DROP_CNTL__INT_DROP_MODE_MASK 0x000000C0L
+#define IH_INT_DROP_CNTL__UTCL2_RETRY_INT_DROP_EN_MASK 0x00000100L
+#define IH_INT_DROP_CNTL__INT_DROPPED_MASK 0x00010000L
+//IH_INT_DROP_MATCH_VALUE0
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE__SHIFT 0x8
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE__SHIFT 0x10
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE__SHIFT 0x17
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE__SHIFT 0x18
+#define IH_INT_DROP_MATCH_VALUE0__CLIENT_ID_MATCH_VALUE_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_VALUE0__SOURCE_ID_MATCH_VALUE_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_VALUE0__VF_ID_MATCH_VALUE_MASK 0x001F0000L
+#define IH_INT_DROP_MATCH_VALUE0__VF_MATCH_VALUE_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_VALUE0__CONTEXT_ID_39_32_MATCH_VALUE_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_VALUE1
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE__SHIFT 0x0
+#define IH_INT_DROP_MATCH_VALUE1__CONTEXT_ID_31_0_MATCH_VALUE_MASK 0xFFFFFFFFL
+//IH_INT_DROP_MATCH_MASK0
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK__SHIFT 0x8
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK__SHIFT 0x10
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK__SHIFT 0x17
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK__SHIFT 0x18
+#define IH_INT_DROP_MATCH_MASK0__CLIENT_ID_MATCH_MASK_MASK 0x000000FFL
+#define IH_INT_DROP_MATCH_MASK0__SOURCE_ID_MATCH_MASK_MASK 0x0000FF00L
+#define IH_INT_DROP_MATCH_MASK0__VF_ID_MATCH_MASK_MASK 0x001F0000L
+#define IH_INT_DROP_MATCH_MASK0__VF_MATCH_MASK_MASK 0x00800000L
+#define IH_INT_DROP_MATCH_MASK0__CONTEXT_ID_39_32_MATCH_MASK_MASK 0xFF000000L
+//IH_INT_DROP_MATCH_MASK1
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK__SHIFT 0x0
+#define IH_INT_DROP_MATCH_MASK1__CONTEXT_ID_31_0_MATCH_MASK_MASK 0xFFFFFFFFL
+//IH_REGISTER_LAST_PART1
+#define IH_REGISTER_LAST_PART1__RESERVED__SHIFT 0x0
+#define IH_REGISTER_LAST_PART1__RESERVED_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h
index 31bef0776ded..ead81aeffd67 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h
@@ -211,6 +211,10 @@
#define regSDMA_RAS_STATUS_BASE_IDX 0
#define regSDMA_CLK_STATUS 0x0068
#define regSDMA_CLK_STATUS_BASE_IDX 0
+#define regSDMA_UE_ERR_STATUS_LO 0x0069
+#define regSDMA_UE_ERR_STATUS_LO_BASE_IDX 0
+#define regSDMA_UE_ERR_STATUS_HI 0x006a
+#define regSDMA_UE_ERR_STATUS_HI_BASE_IDX 0
#define regSDMA_POWER_CNTL 0x006b
#define regSDMA_POWER_CNTL_BASE_IDX 0
#define regSDMA_CLK_CTRL 0x006c
diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h
index e46cb3339355..290953bdf1d6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h
@@ -1171,6 +1171,30 @@
#define SDMA_CLK_STATUS__F32_CLK_MASK 0x00000008L
#define SDMA_CLK_STATUS__CE_CLK_MASK 0x00000010L
#define SDMA_CLK_STATUS__PERF_CLK_MASK 0x00000020L
+//SDMA_UE_ERR_STATUS_LO
+#define SDMA_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define SDMA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG__SHIFT 0x1
+#define SDMA_UE_ERR_STATUS_LO__ADDRESS__SHIFT 0x2
+#define SDMA_UE_ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define SDMA_UE_ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define SDMA_UE_ERR_STATUS_LO__ADDRESS_VALID_FLAG_MASK 0x00000002L
+#define SDMA_UE_ERR_STATUS_LO__ADDRESS_MASK 0x00FFFFFCL
+#define SDMA_UE_ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+//SDMA_UE_ERR_STATUS_HI
+#define SDMA_UE_ERR_STATUS_HI__ECC__SHIFT 0x0
+#define SDMA_UE_ERR_STATUS_HI__PARITY__SHIFT 0x1
+#define SDMA_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define SDMA_UE_ERR_STATUS_HI__ERR_INFO__SHIFT 0x3
+#define SDMA_UE_ERR_STATUS_HI__UE_CNT__SHIFT 0x17
+#define SDMA_UE_ERR_STATUS_HI__FED_CNT__SHIFT 0x1a
+#define SDMA_UE_ERR_STATUS_HI__RESERVED__SHIFT 0x1d
+#define SDMA_UE_ERR_STATUS_HI__ECC_MASK 0x00000001L
+#define SDMA_UE_ERR_STATUS_HI__PARITY_MASK 0x00000002L
+#define SDMA_UE_ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define SDMA_UE_ERR_STATUS_HI__ERR_INFO_MASK 0x007FFFF8L
+#define SDMA_UE_ERR_STATUS_HI__UE_CNT_MASK 0x03800000L
+#define SDMA_UE_ERR_STATUS_HI__FED_CNT_MASK 0x1C000000L
+#define SDMA_UE_ERR_STATUS_HI__RESERVED_MASK 0xE0000000L
//SDMA_POWER_CNTL
#define SDMA_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0
#define SDMA_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_offset.h
new file mode 100644
index 000000000000..b62b489402c5
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_offset.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _smuio_13_0_3_OFFSET_HEADER
+#define _smuio_13_0_3_OFFSET_HEADER
+
+
+
+// addressBlock: aid_smuio_smuio_reset_SmuSmuioDec
+// base address: 0x5a300
+#define regSMUIO_MP_RESET_INTR 0x00c1
+#define regSMUIO_MP_RESET_INTR_BASE_IDX 1
+#define regSMUIO_SOC_HALT 0x00c2
+#define regSMUIO_SOC_HALT_BASE_IDX 1
+
+
+// addressBlock: aid_smuio_smuio_tsc_SmuSmuioDec
+// base address: 0x5a8a0
+#define regPWROK_REFCLK_GAP_CYCLES 0x0028
+#define regPWROK_REFCLK_GAP_CYCLES_BASE_IDX 2
+#define regGOLDEN_TSC_INCREMENT_UPPER 0x002b
+#define regGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 2
+#define regGOLDEN_TSC_INCREMENT_LOWER 0x002c
+#define regGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 2
+#define regGOLDEN_TSC_COUNT_UPPER 0x002d
+#define regGOLDEN_TSC_COUNT_UPPER_BASE_IDX 2
+#define regGOLDEN_TSC_COUNT_LOWER 0x002e
+#define regGOLDEN_TSC_COUNT_LOWER_BASE_IDX 2
+#define regSOC_GOLDEN_TSC_SHADOW_UPPER 0x002f
+#define regSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 2
+#define regSOC_GOLDEN_TSC_SHADOW_LOWER 0x0030
+#define regSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 2
+#define regSOC_GAP_PWROK 0x0031
+#define regSOC_GAP_PWROK_BASE_IDX 2
+
+
+// addressBlock: aid_smuio_smuio_swtimer_SmuSmuioDec
+// base address: 0x5ac70
+#define regPWR_VIRT_RESET_REQ 0x011c
+#define regPWR_VIRT_RESET_REQ_BASE_IDX 2
+#define regPWR_DISP_TIMER_CONTROL 0x011d
+#define regPWR_DISP_TIMER_CONTROL_BASE_IDX 2
+#define regPWR_DISP_TIMER_DEBUG 0x011e
+#define regPWR_DISP_TIMER_DEBUG_BASE_IDX 2
+#define regPWR_DISP_TIMER2_CONTROL 0x011f
+#define regPWR_DISP_TIMER2_CONTROL_BASE_IDX 2
+#define regPWR_DISP_TIMER2_DEBUG 0x0120
+#define regPWR_DISP_TIMER2_DEBUG_BASE_IDX 2
+#define regPWR_DISP_TIMER_GLOBAL_CONTROL 0x0121
+#define regPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 2
+#define regPWR_IH_CONTROL 0x0122
+#define regPWR_IH_CONTROL_BASE_IDX 2
+
+
+// addressBlock: aid_smuio_smuio_misc_SmuSmuioDec
+// base address: 0x5a000
+#define regSMUIO_MCM_CONFIG 0x0023
+#define regSMUIO_MCM_CONFIG_BASE_IDX 1
+#define regIP_DISCOVERY_VERSION 0x0000
+#define regIP_DISCOVERY_VERSION_BASE_IDX 2
+#define regSCRATCH_REGISTER0 0x01bd
+#define regSCRATCH_REGISTER0_BASE_IDX 2
+#define regSCRATCH_REGISTER1 0x01be
+#define regSCRATCH_REGISTER1_BASE_IDX 2
+#define regSCRATCH_REGISTER2 0x01bf
+#define regSCRATCH_REGISTER2_BASE_IDX 2
+#define regSCRATCH_REGISTER3 0x01c0
+#define regSCRATCH_REGISTER3_BASE_IDX 2
+#define regSCRATCH_REGISTER4 0x01c1
+#define regSCRATCH_REGISTER4_BASE_IDX 2
+#define regSCRATCH_REGISTER5 0x01c2
+#define regSCRATCH_REGISTER5_BASE_IDX 2
+#define regSCRATCH_REGISTER6 0x01c3
+#define regSCRATCH_REGISTER6_BASE_IDX 2
+#define regSCRATCH_REGISTER7 0x01c4
+#define regSCRATCH_REGISTER7_BASE_IDX 2
+
+
+// addressBlock: aid_smuio_smuio_gpio_SmuSmuioDec
+// base address: 0x5a500
+#define regSMU_GPIOPAD_SW_INT_STAT 0x0140
+#define regSMU_GPIOPAD_SW_INT_STAT_BASE_IDX 1
+#define regSMU_GPIOPAD_MASK 0x0141
+#define regSMU_GPIOPAD_MASK_BASE_IDX 1
+#define regSMU_GPIOPAD_A 0x0142
+#define regSMU_GPIOPAD_A_BASE_IDX 1
+#define regSMU_GPIOPAD_TXIMPSEL 0x0143
+#define regSMU_GPIOPAD_TXIMPSEL_BASE_IDX 1
+#define regSMU_GPIOPAD_EN 0x0144
+#define regSMU_GPIOPAD_EN_BASE_IDX 1
+#define regSMU_GPIOPAD_Y 0x0145
+#define regSMU_GPIOPAD_Y_BASE_IDX 1
+#define regSMU_GPIOPAD_RXEN 0x0146
+#define regSMU_GPIOPAD_RXEN_BASE_IDX 1
+#define regSMU_GPIOPAD_RCVR_SEL0 0x0147
+#define regSMU_GPIOPAD_RCVR_SEL0_BASE_IDX 1
+#define regSMU_GPIOPAD_RCVR_SEL1 0x0148
+#define regSMU_GPIOPAD_RCVR_SEL1_BASE_IDX 1
+#define regSMU_GPIOPAD_PU_EN 0x0149
+#define regSMU_GPIOPAD_PU_EN_BASE_IDX 1
+#define regSMU_GPIOPAD_PD_EN 0x014a
+#define regSMU_GPIOPAD_PD_EN_BASE_IDX 1
+#define regSMU_GPIOPAD_PINSTRAPS 0x014b
+#define regSMU_GPIOPAD_PINSTRAPS_BASE_IDX 1
+#define regDFT_PINSTRAPS 0x014c
+#define regDFT_PINSTRAPS_BASE_IDX 1
+#define regSMU_GPIOPAD_INT_STAT_EN 0x014d
+#define regSMU_GPIOPAD_INT_STAT_EN_BASE_IDX 1
+#define regSMU_GPIOPAD_INT_STAT 0x014e
+#define regSMU_GPIOPAD_INT_STAT_BASE_IDX 1
+#define regSMU_GPIOPAD_INT_STAT_AK 0x014f
+#define regSMU_GPIOPAD_INT_STAT_AK_BASE_IDX 1
+#define regSMU_GPIOPAD_INT_EN 0x0150
+#define regSMU_GPIOPAD_INT_EN_BASE_IDX 1
+#define regSMU_GPIOPAD_INT_TYPE 0x0151
+#define regSMU_GPIOPAD_INT_TYPE_BASE_IDX 1
+#define regSMU_GPIOPAD_INT_POLARITY 0x0152
+#define regSMU_GPIOPAD_INT_POLARITY_BASE_IDX 1
+#define regSMUIO_PCC_GPIO_SELECT 0x0155
+#define regSMUIO_PCC_GPIO_SELECT_BASE_IDX 1
+#define regSMU_GPIOPAD_S0 0x0156
+#define regSMU_GPIOPAD_S0_BASE_IDX 1
+#define regSMU_GPIOPAD_S1 0x0157
+#define regSMU_GPIOPAD_S1_BASE_IDX 1
+#define regSMU_GPIOPAD_SCHMEN 0x0158
+#define regSMU_GPIOPAD_SCHMEN_BASE_IDX 1
+#define regSMU_GPIOPAD_SCL_EN 0x0159
+#define regSMU_GPIOPAD_SCL_EN_BASE_IDX 1
+#define regSMU_GPIOPAD_SDA_EN 0x015a
+#define regSMU_GPIOPAD_SDA_EN_BASE_IDX 1
+#define regSMUIO_GPIO_INT0_SELECT 0x015b
+#define regSMUIO_GPIO_INT0_SELECT_BASE_IDX 1
+#define regSMUIO_GPIO_INT1_SELECT 0x015c
+#define regSMUIO_GPIO_INT1_SELECT_BASE_IDX 1
+#define regSMUIO_GPIO_INT2_SELECT 0x015d
+#define regSMUIO_GPIO_INT2_SELECT_BASE_IDX 1
+#define regSMUIO_GPIO_INT3_SELECT 0x015e
+#define regSMUIO_GPIO_INT3_SELECT_BASE_IDX 1
+#define regSMU_GPIOPAD_MP_INT0_STAT 0x015f
+#define regSMU_GPIOPAD_MP_INT0_STAT_BASE_IDX 1
+#define regSMU_GPIOPAD_MP_INT1_STAT 0x0160
+#define regSMU_GPIOPAD_MP_INT1_STAT_BASE_IDX 1
+#define regSMU_GPIOPAD_MP_INT2_STAT 0x0161
+#define regSMU_GPIOPAD_MP_INT2_STAT_BASE_IDX 1
+#define regSMU_GPIOPAD_MP_INT3_STAT 0x0162
+#define regSMU_GPIOPAD_MP_INT3_STAT_BASE_IDX 1
+#define regSMIO_INDEX 0x0163
+#define regSMIO_INDEX_BASE_IDX 1
+#define regS0_VID_SMIO_CNTL 0x0164
+#define regS0_VID_SMIO_CNTL_BASE_IDX 1
+#define regS1_VID_SMIO_CNTL 0x0165
+#define regS1_VID_SMIO_CNTL_BASE_IDX 1
+#define regOPEN_DRAIN_SELECT 0x0166
+#define regOPEN_DRAIN_SELECT_BASE_IDX 1
+#define regSMIO_ENABLE 0x0167
+#define regSMIO_ENABLE_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_sh_mask.h
new file mode 100644
index 000000000000..be896f3089fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_sh_mask.h
@@ -0,0 +1,428 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _smuio_13_0_3_SH_MASK_HEADER
+#define _smuio_13_0_3_SH_MASK_HEADER
+
+
+// addressBlock: aid_smuio_smuio_reset_SmuSmuioDec
+//SMUIO_MP_RESET_INTR
+#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0
+#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L
+//SMUIO_SOC_HALT
+#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN__SHIFT 0x2
+#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN__SHIFT 0x3
+#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN_MASK 0x00000004L
+#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN_MASK 0x00000008L
+
+
+// addressBlock: aid_smuio_smuio_tsc_SmuSmuioDec
+//PWROK_REFCLK_GAP_CYCLES
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L
+//GOLDEN_TSC_INCREMENT_UPPER
+#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0
+#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL
+//GOLDEN_TSC_INCREMENT_LOWER
+#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0
+#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL
+//GOLDEN_TSC_COUNT_UPPER
+#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0
+#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL
+//GOLDEN_TSC_COUNT_LOWER
+#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0
+#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL
+//SOC_GOLDEN_TSC_SHADOW_UPPER
+#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0
+#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL
+//SOC_GOLDEN_TSC_SHADOW_LOWER
+#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0
+#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL
+//SOC_GAP_PWROK
+#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0
+#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L
+
+
+// addressBlock: aid_smuio_smuio_swtimer_SmuSmuioDec
+//PWR_VIRT_RESET_REQ
+#define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
+#define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f
+#define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL
+#define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L
+//PWR_DISP_TIMER_CONTROL
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
+//PWR_DISP_TIMER_DEBUG
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x00000004L
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L
+//PWR_DISP_TIMER2_CONTROL
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
+//PWR_DISP_TIMER2_DEBUG
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x00000004L
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L
+//PWR_DISP_TIMER_GLOBAL_CONTROL
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L
+//PWR_IH_CONTROL
+#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0
+#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5
+#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6
+#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f
+#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL
+#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L
+#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L
+#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L
+
+
+// addressBlock: aid_smuio_smuio_misc_SmuSmuioDec
+//SMUIO_MCM_CONFIG
+#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0
+#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2
+#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x8
+#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0xc
+#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10
+#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11
+#define SMUIO_MCM_CONFIG__TOPOLOGY_ID__SHIFT 0x12
+#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L
+#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000003CL
+#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000F00L
+#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x00001000L
+#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L
+#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L
+#define SMUIO_MCM_CONFIG__TOPOLOGY_ID_MASK 0x007C0000L
+//IP_DISCOVERY_VERSION
+#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0
+#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER0
+#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0
+#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER1
+#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0
+#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER2
+#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0
+#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER3
+#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0
+#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER4
+#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0
+#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER5
+#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0
+#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER6
+#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0
+#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER7
+#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0
+#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_smuio_smuio_gpio_SmuSmuioDec
+//SMU_GPIOPAD_SW_INT_STAT
+#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x00000001L
+//SMU_GPIOPAD_MASK
+#define SMU_GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0
+#define SMU_GPIOPAD_MASK__GPIO_MASK_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_A
+#define SMU_GPIOPAD_A__GPIO_A__SHIFT 0x0
+#define SMU_GPIOPAD_A__GPIO_A_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_TXIMPSEL
+#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL__SHIFT 0x0
+#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_EN
+#define SMU_GPIOPAD_EN__GPIO_EN__SHIFT 0x0
+#define SMU_GPIOPAD_EN__GPIO_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_Y
+#define SMU_GPIOPAD_Y__GPIO_Y__SHIFT 0x0
+#define SMU_GPIOPAD_Y__GPIO_Y_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_RXEN
+#define SMU_GPIOPAD_RXEN__GPIO_RXEN__SHIFT 0x0
+#define SMU_GPIOPAD_RXEN__GPIO_RXEN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_RCVR_SEL0
+#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0__SHIFT 0x0
+#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_RCVR_SEL1
+#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1__SHIFT 0x0
+#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_PU_EN
+#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0
+#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_PD_EN
+#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0
+#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_PINSTRAPS
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x00000001L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x00000002L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x00000004L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x00000008L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x00000010L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x00000020L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x00000040L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x00000080L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x00000100L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x00000200L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x00000400L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x00000800L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x00001000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x00002000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x00004000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x00008000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x00010000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x00020000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x00040000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x00080000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x00100000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x00200000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x00400000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x00800000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x01000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x02000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x04000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x08000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000L
+#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000L
+//DFT_PINSTRAPS
+#define DFT_PINSTRAPS__DFT_PINSTRAPS__SHIFT 0x0
+#define DFT_PINSTRAPS__DFT_PINSTRAPS_MASK 0x000003FFL
+//SMU_GPIOPAD_INT_STAT_EN
+#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0
+#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000L
+//SMU_GPIOPAD_INT_STAT
+#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000L
+//SMU_GPIOPAD_INT_STAT_AK
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c
+#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x00000001L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x00000002L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x00000004L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x00000008L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x00000010L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x00000020L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x00000040L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x00000080L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x00000100L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x00000200L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x00000400L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x00000800L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x00001000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x00002000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x00004000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x00008000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x00010000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x00020000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x00040000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x00080000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x00100000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x00200000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x00400000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x00800000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x01000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x02000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x04000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x08000000L
+#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000L
+#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000L
+//SMU_GPIOPAD_INT_EN
+#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0
+#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000L
+//SMU_GPIOPAD_INT_TYPE
+#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0
+#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000L
+//SMU_GPIOPAD_INT_POLARITY
+#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0
+#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f
+#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1FFFFFFFL
+#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000L
+//SMUIO_PCC_GPIO_SELECT
+#define SMUIO_PCC_GPIO_SELECT__GPIO__SHIFT 0x0
+#define SMUIO_PCC_GPIO_SELECT__GPIO_MASK 0xFFFFFFFFL
+//SMU_GPIOPAD_S0
+#define SMU_GPIOPAD_S0__GPIO_S0__SHIFT 0x0
+#define SMU_GPIOPAD_S0__GPIO_S0_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_S1
+#define SMU_GPIOPAD_S1__GPIO_S1__SHIFT 0x0
+#define SMU_GPIOPAD_S1__GPIO_S1_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_SCHMEN
+#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN__SHIFT 0x0
+#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_SCL_EN
+#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN__SHIFT 0x0
+#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN_MASK 0x7FFFFFFFL
+//SMU_GPIOPAD_SDA_EN
+#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN__SHIFT 0x0
+#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN_MASK 0x7FFFFFFFL
+//SMUIO_GPIO_INT0_SELECT
+#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT_MASK 0xFFFFFFFFL
+//SMUIO_GPIO_INT1_SELECT
+#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT_MASK 0xFFFFFFFFL
+//SMUIO_GPIO_INT2_SELECT
+#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT_MASK 0xFFFFFFFFL
+//SMUIO_GPIO_INT3_SELECT
+#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT__SHIFT 0x0
+#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT_MASK 0xFFFFFFFFL
+//SMU_GPIOPAD_MP_INT0_STAT
+#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT_MASK 0x1FFFFFFFL
+//SMU_GPIOPAD_MP_INT1_STAT
+#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT_MASK 0x1FFFFFFFL
+//SMU_GPIOPAD_MP_INT2_STAT
+#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT_MASK 0x1FFFFFFFL
+//SMU_GPIOPAD_MP_INT3_STAT
+#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT__SHIFT 0x0
+#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT_MASK 0x1FFFFFFFL
+//SMIO_INDEX
+#define SMIO_INDEX__SW_SMIO_INDEX__SHIFT 0x0
+#define SMIO_INDEX__SW_SMIO_INDEX_MASK 0x00000001L
+//S0_VID_SMIO_CNTL
+#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES__SHIFT 0x0
+#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES_MASK 0xFFFFFFFFL
+//S1_VID_SMIO_CNTL
+#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES__SHIFT 0x0
+#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES_MASK 0xFFFFFFFFL
+//OPEN_DRAIN_SELECT
+#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT__SHIFT 0x0
+#define OPEN_DRAIN_SELECT__RESERVED__SHIFT 0x1f
+#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT_MASK 0x7FFFFFFFL
+#define OPEN_DRAIN_SELECT__RESERVED_MASK 0x80000000L
+//SMIO_ENABLE
+#define SMIO_ENABLE__SMIO_ENABLE__SHIFT 0x0
+#define SMIO_ENABLE__SMIO_ENABLE_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h
new file mode 100644
index 000000000000..e9742d10de1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h
@@ -0,0 +1,2332 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _vcn_4_0_3_OFFSET_HEADER
+#define _vcn_4_0_3_OFFSET_HEADER
+
+
+
+// addressBlock: aid_uvd0_uvddec
+// base address: 0x1fb00
+#define regUVD_TOP_CTRL 0x00c0
+#define regUVD_TOP_CTRL_BASE_IDX 1
+#define regUVD_CGC_GATE 0x00c1
+#define regUVD_CGC_GATE_BASE_IDX 1
+#define regUVD_CGC_CTRL 0x00c2
+#define regUVD_CGC_CTRL_BASE_IDX 1
+#define regAVM_SUVD_CGC_GATE 0x00c4
+#define regAVM_SUVD_CGC_GATE_BASE_IDX 1
+#define regCDEFE_SUVD_CGC_GATE 0x00c4
+#define regCDEFE_SUVD_CGC_GATE_BASE_IDX 1
+#define regEFC_SUVD_CGC_GATE 0x00c4
+#define regEFC_SUVD_CGC_GATE_BASE_IDX 1
+#define regENT_SUVD_CGC_GATE 0x00c4
+#define regENT_SUVD_CGC_GATE_BASE_IDX 1
+#define regIME_SUVD_CGC_GATE 0x00c4
+#define regIME_SUVD_CGC_GATE_BASE_IDX 1
+#define regPPU_SUVD_CGC_GATE 0x00c4
+#define regPPU_SUVD_CGC_GATE_BASE_IDX 1
+#define regSAOE_SUVD_CGC_GATE 0x00c4
+#define regSAOE_SUVD_CGC_GATE_BASE_IDX 1
+#define regSCM_SUVD_CGC_GATE 0x00c4
+#define regSCM_SUVD_CGC_GATE_BASE_IDX 1
+#define regSDB_SUVD_CGC_GATE 0x00c4
+#define regSDB_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT0_NXT_SUVD_CGC_GATE 0x00c4
+#define regSIT0_NXT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT1_NXT_SUVD_CGC_GATE 0x00c4
+#define regSIT1_NXT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT2_NXT_SUVD_CGC_GATE 0x00c4
+#define regSIT2_NXT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSIT_SUVD_CGC_GATE 0x00c4
+#define regSIT_SUVD_CGC_GATE_BASE_IDX 1
+#define regSMPA_SUVD_CGC_GATE 0x00c4
+#define regSMPA_SUVD_CGC_GATE_BASE_IDX 1
+#define regSMP_SUVD_CGC_GATE 0x00c4
+#define regSMP_SUVD_CGC_GATE_BASE_IDX 1
+#define regSRE_SUVD_CGC_GATE 0x00c4
+#define regSRE_SUVD_CGC_GATE_BASE_IDX 1
+#define regUVD_MPBE0_SUVD_CGC_GATE 0x00c4
+#define regUVD_MPBE0_SUVD_CGC_GATE_BASE_IDX 1
+#define regUVD_MPBE1_SUVD_CGC_GATE 0x00c4
+#define regUVD_MPBE1_SUVD_CGC_GATE_BASE_IDX 1
+#define regUVD_SUVD_CGC_GATE 0x00c4
+#define regUVD_SUVD_CGC_GATE_BASE_IDX 1
+#define regAVM_SUVD_CGC_GATE2 0x00c5
+#define regAVM_SUVD_CGC_GATE2_BASE_IDX 1
+#define regCDEFE_SUVD_CGC_GATE2 0x00c5
+#define regCDEFE_SUVD_CGC_GATE2_BASE_IDX 1
+#define regDBR_SUVD_CGC_GATE2 0x00c5
+#define regDBR_SUVD_CGC_GATE2_BASE_IDX 1
+#define regENT_SUVD_CGC_GATE2 0x00c5
+#define regENT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regIME_SUVD_CGC_GATE2 0x00c5
+#define regIME_SUVD_CGC_GATE2_BASE_IDX 1
+#define regMPC1_SUVD_CGC_GATE2 0x00c5
+#define regMPC1_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSAOE_SUVD_CGC_GATE2 0x00c5
+#define regSAOE_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSDB_SUVD_CGC_GATE2 0x00c5
+#define regSDB_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT0_NXT_SUVD_CGC_GATE2 0x00c5
+#define regSIT0_NXT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT1_NXT_SUVD_CGC_GATE2 0x00c5
+#define regSIT1_NXT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT2_NXT_SUVD_CGC_GATE2 0x00c5
+#define regSIT2_NXT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSIT_SUVD_CGC_GATE2 0x00c5
+#define regSIT_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSMPA_SUVD_CGC_GATE2 0x00c5
+#define regSMPA_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSMP_SUVD_CGC_GATE2 0x00c5
+#define regSMP_SUVD_CGC_GATE2_BASE_IDX 1
+#define regSRE_SUVD_CGC_GATE2 0x00c5
+#define regSRE_SUVD_CGC_GATE2_BASE_IDX 1
+#define regUVD_MPBE0_SUVD_CGC_GATE2 0x00c5
+#define regUVD_MPBE0_SUVD_CGC_GATE2_BASE_IDX 1
+#define regUVD_MPBE1_SUVD_CGC_GATE2 0x00c5
+#define regUVD_MPBE1_SUVD_CGC_GATE2_BASE_IDX 1
+#define regUVD_SUVD_CGC_GATE2 0x00c5
+#define regUVD_SUVD_CGC_GATE2_BASE_IDX 1
+#define regAVM_SUVD_CGC_CTRL 0x00c6
+#define regAVM_SUVD_CGC_CTRL_BASE_IDX 1
+#define regCDEFE_SUVD_CGC_CTRL 0x00c6
+#define regCDEFE_SUVD_CGC_CTRL_BASE_IDX 1
+#define regDBR_SUVD_CGC_CTRL 0x00c6
+#define regDBR_SUVD_CGC_CTRL_BASE_IDX 1
+#define regEFC_SUVD_CGC_CTRL 0x00c6
+#define regEFC_SUVD_CGC_CTRL_BASE_IDX 1
+#define regENT_SUVD_CGC_CTRL 0x00c6
+#define regENT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regIME_SUVD_CGC_CTRL 0x00c6
+#define regIME_SUVD_CGC_CTRL_BASE_IDX 1
+#define regMPC1_SUVD_CGC_CTRL 0x00c6
+#define regMPC1_SUVD_CGC_CTRL_BASE_IDX 1
+#define regPPU_SUVD_CGC_CTRL 0x00c6
+#define regPPU_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSAOE_SUVD_CGC_CTRL 0x00c6
+#define regSAOE_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSCM_SUVD_CGC_CTRL 0x00c6
+#define regSCM_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSDB_SUVD_CGC_CTRL 0x00c6
+#define regSDB_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT0_NXT_SUVD_CGC_CTRL 0x00c6
+#define regSIT0_NXT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT1_NXT_SUVD_CGC_CTRL 0x00c6
+#define regSIT1_NXT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT2_NXT_SUVD_CGC_CTRL 0x00c6
+#define regSIT2_NXT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSIT_SUVD_CGC_CTRL 0x00c6
+#define regSIT_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSMPA_SUVD_CGC_CTRL 0x00c6
+#define regSMPA_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSMP_SUVD_CGC_CTRL 0x00c6
+#define regSMP_SUVD_CGC_CTRL_BASE_IDX 1
+#define regSRE_SUVD_CGC_CTRL 0x00c6
+#define regSRE_SUVD_CGC_CTRL_BASE_IDX 1
+#define regUVD_MPBE0_SUVD_CGC_CTRL 0x00c6
+#define regUVD_MPBE0_SUVD_CGC_CTRL_BASE_IDX 1
+#define regUVD_MPBE1_SUVD_CGC_CTRL 0x00c6
+#define regUVD_MPBE1_SUVD_CGC_CTRL_BASE_IDX 1
+#define regUVD_SUVD_CGC_CTRL 0x00c6
+#define regUVD_SUVD_CGC_CTRL_BASE_IDX 1
+#define regUVD_CGC_CTRL3 0x00ca
+#define regUVD_CGC_CTRL3_BASE_IDX 1
+#define regUVD_GPCOM_VCPU_DATA0 0x00d0
+#define regUVD_GPCOM_VCPU_DATA0_BASE_IDX 1
+#define regUVD_GPCOM_VCPU_DATA1 0x00d1
+#define regUVD_GPCOM_VCPU_DATA1_BASE_IDX 1
+#define regUVD_GPCOM_SYS_CMD 0x00d2
+#define regUVD_GPCOM_SYS_CMD_BASE_IDX 1
+#define regUVD_GPCOM_SYS_DATA0 0x00d3
+#define regUVD_GPCOM_SYS_DATA0_BASE_IDX 1
+#define regUVD_GPCOM_SYS_DATA1 0x00d4
+#define regUVD_GPCOM_SYS_DATA1_BASE_IDX 1
+#define regUVD_VCPU_INT_EN 0x00d5
+#define regUVD_VCPU_INT_EN_BASE_IDX 1
+#define regUVD_VCPU_INT_STATUS 0x00d6
+#define regUVD_VCPU_INT_STATUS_BASE_IDX 1
+#define regUVD_VCPU_INT_ACK 0x00d7
+#define regUVD_VCPU_INT_ACK_BASE_IDX 1
+#define regUVD_VCPU_INT_ROUTE 0x00d8
+#define regUVD_VCPU_INT_ROUTE_BASE_IDX 1
+#define regUVD_DRV_FW_MSG 0x00d9
+#define regUVD_DRV_FW_MSG_BASE_IDX 1
+#define regUVD_FW_DRV_MSG_ACK 0x00da
+#define regUVD_FW_DRV_MSG_ACK_BASE_IDX 1
+#define regUVD_SUVD_INT_EN 0x00db
+#define regUVD_SUVD_INT_EN_BASE_IDX 1
+#define regUVD_SUVD_INT_STATUS 0x00dc
+#define regUVD_SUVD_INT_STATUS_BASE_IDX 1
+#define regUVD_SUVD_INT_ACK 0x00dd
+#define regUVD_SUVD_INT_ACK_BASE_IDX 1
+#define regUVD_ENC_VCPU_INT_EN 0x00de
+#define regUVD_ENC_VCPU_INT_EN_BASE_IDX 1
+#define regUVD_ENC_VCPU_INT_STATUS 0x00df
+#define regUVD_ENC_VCPU_INT_STATUS_BASE_IDX 1
+#define regUVD_ENC_VCPU_INT_ACK 0x00e0
+#define regUVD_ENC_VCPU_INT_ACK_BASE_IDX 1
+#define regUVD_MASTINT_EN 0x00e1
+#define regUVD_MASTINT_EN_BASE_IDX 1
+#define regUVD_SYS_INT_EN 0x00e2
+#define regUVD_SYS_INT_EN_BASE_IDX 1
+#define regUVD_SYS_INT_STATUS 0x00e3
+#define regUVD_SYS_INT_STATUS_BASE_IDX 1
+#define regUVD_SYS_INT_ACK 0x00e4
+#define regUVD_SYS_INT_ACK_BASE_IDX 1
+#define regUVD_JOB_DONE 0x00e5
+#define regUVD_JOB_DONE_BASE_IDX 1
+#define regUVD_CBUF_ID 0x00e6
+#define regUVD_CBUF_ID_BASE_IDX 1
+#define regUVD_CONTEXT_ID 0x00e7
+#define regUVD_CONTEXT_ID_BASE_IDX 1
+#define regUVD_CONTEXT_ID2 0x00e8
+#define regUVD_CONTEXT_ID2_BASE_IDX 1
+#define regUVD_NO_OP 0x00e9
+#define regUVD_NO_OP_BASE_IDX 1
+#define regUVD_RB_BASE_LO 0x00ea
+#define regUVD_RB_BASE_LO_BASE_IDX 1
+#define regUVD_RB_BASE_HI 0x00eb
+#define regUVD_RB_BASE_HI_BASE_IDX 1
+#define regUVD_RB_SIZE 0x00ec
+#define regUVD_RB_SIZE_BASE_IDX 1
+#define regUVD_RB_BASE_LO2 0x00ef
+#define regUVD_RB_BASE_LO2_BASE_IDX 1
+#define regUVD_RB_BASE_HI2 0x00f0
+#define regUVD_RB_BASE_HI2_BASE_IDX 1
+#define regUVD_RB_SIZE2 0x00f1
+#define regUVD_RB_SIZE2_BASE_IDX 1
+#define regUVD_RB_BASE_LO3 0x00f4
+#define regUVD_RB_BASE_LO3_BASE_IDX 1
+#define regUVD_RB_BASE_HI3 0x00f5
+#define regUVD_RB_BASE_HI3_BASE_IDX 1
+#define regUVD_RB_SIZE3 0x00f6
+#define regUVD_RB_SIZE3_BASE_IDX 1
+#define regUVD_RB_BASE_LO4 0x00f9
+#define regUVD_RB_BASE_LO4_BASE_IDX 1
+#define regUVD_RB_BASE_HI4 0x00fa
+#define regUVD_RB_BASE_HI4_BASE_IDX 1
+#define regUVD_RB_SIZE4 0x00fb
+#define regUVD_RB_SIZE4_BASE_IDX 1
+#define regUVD_OUT_RB_BASE_LO 0x00fe
+#define regUVD_OUT_RB_BASE_LO_BASE_IDX 1
+#define regUVD_OUT_RB_BASE_HI 0x00ff
+#define regUVD_OUT_RB_BASE_HI_BASE_IDX 1
+#define regUVD_OUT_RB_SIZE 0x0100
+#define regUVD_OUT_RB_SIZE_BASE_IDX 1
+#define regUVD_IOV_ACTIVE_FCN_ID 0x0103
+#define regUVD_IOV_ACTIVE_FCN_ID_BASE_IDX 1
+#define regUVD_IOV_MAILBOX 0x0104
+#define regUVD_IOV_MAILBOX_BASE_IDX 1
+#define regUVD_IOV_MAILBOX_RESP 0x0105
+#define regUVD_IOV_MAILBOX_RESP_BASE_IDX 1
+#define regUVD_RB_ARB_CTRL 0x0106
+#define regUVD_RB_ARB_CTRL_BASE_IDX 1
+#define regUVD_CTX_INDEX 0x0107
+#define regUVD_CTX_INDEX_BASE_IDX 1
+#define regUVD_CTX_DATA 0x0108
+#define regUVD_CTX_DATA_BASE_IDX 1
+#define regUVD_CXW_WR 0x0109
+#define regUVD_CXW_WR_BASE_IDX 1
+#define regUVD_CXW_WR_INT_ID 0x010a
+#define regUVD_CXW_WR_INT_ID_BASE_IDX 1
+#define regUVD_CXW_WR_INT_CTX_ID 0x010b
+#define regUVD_CXW_WR_INT_CTX_ID_BASE_IDX 1
+#define regUVD_CXW_INT_ID 0x010c
+#define regUVD_CXW_INT_ID_BASE_IDX 1
+#define regUVD_MPEG2_ERROR 0x010d
+#define regUVD_MPEG2_ERROR_BASE_IDX 1
+#define regUVD_YBASE 0x0110
+#define regUVD_YBASE_BASE_IDX 1
+#define regUVD_UVBASE 0x0111
+#define regUVD_UVBASE_BASE_IDX 1
+#define regUVD_PITCH 0x0112
+#define regUVD_PITCH_BASE_IDX 1
+#define regUVD_WIDTH 0x0113
+#define regUVD_WIDTH_BASE_IDX 1
+#define regUVD_HEIGHT 0x0114
+#define regUVD_HEIGHT_BASE_IDX 1
+#define regUVD_PICCOUNT 0x0115
+#define regUVD_PICCOUNT_BASE_IDX 1
+#define regUVD_MPRD_INITIAL_XY 0x0116
+#define regUVD_MPRD_INITIAL_XY_BASE_IDX 1
+#define regUVD_MPEG2_CTRL 0x0117
+#define regUVD_MPEG2_CTRL_BASE_IDX 1
+#define regUVD_MB_CTL_BUF_BASE 0x0118
+#define regUVD_MB_CTL_BUF_BASE_BASE_IDX 1
+#define regUVD_PIC_CTL_BUF_BASE 0x0119
+#define regUVD_PIC_CTL_BUF_BASE_BASE_IDX 1
+#define regUVD_DXVA_BUF_SIZE 0x011a
+#define regUVD_DXVA_BUF_SIZE_BASE_IDX 1
+#define regUVD_SCRATCH_NP 0x011b
+#define regUVD_SCRATCH_NP_BASE_IDX 1
+#define regUVD_CLK_SWT_HANDSHAKE 0x011c
+#define regUVD_CLK_SWT_HANDSHAKE_BASE_IDX 1
+#define regUVD_GP_SCRATCH0 0x011e
+#define regUVD_GP_SCRATCH0_BASE_IDX 1
+#define regUVD_GP_SCRATCH1 0x011f
+#define regUVD_GP_SCRATCH1_BASE_IDX 1
+#define regUVD_GP_SCRATCH2 0x0120
+#define regUVD_GP_SCRATCH2_BASE_IDX 1
+#define regUVD_GP_SCRATCH3 0x0121
+#define regUVD_GP_SCRATCH3_BASE_IDX 1
+#define regUVD_GP_SCRATCH4 0x0122
+#define regUVD_GP_SCRATCH4_BASE_IDX 1
+#define regUVD_GP_SCRATCH5 0x0123
+#define regUVD_GP_SCRATCH5_BASE_IDX 1
+#define regUVD_GP_SCRATCH6 0x0124
+#define regUVD_GP_SCRATCH6_BASE_IDX 1
+#define regUVD_GP_SCRATCH7 0x0125
+#define regUVD_GP_SCRATCH7_BASE_IDX 1
+#define regUVD_GP_SCRATCH8 0x0126
+#define regUVD_GP_SCRATCH8_BASE_IDX 1
+#define regUVD_GP_SCRATCH9 0x0127
+#define regUVD_GP_SCRATCH9_BASE_IDX 1
+#define regUVD_GP_SCRATCH10 0x0128
+#define regUVD_GP_SCRATCH10_BASE_IDX 1
+#define regUVD_GP_SCRATCH11 0x0129
+#define regUVD_GP_SCRATCH11_BASE_IDX 1
+#define regUVD_GP_SCRATCH12 0x012a
+#define regUVD_GP_SCRATCH12_BASE_IDX 1
+#define regUVD_GP_SCRATCH13 0x012b
+#define regUVD_GP_SCRATCH13_BASE_IDX 1
+#define regUVD_GP_SCRATCH14 0x012c
+#define regUVD_GP_SCRATCH14_BASE_IDX 1
+#define regUVD_GP_SCRATCH15 0x012d
+#define regUVD_GP_SCRATCH15_BASE_IDX 1
+#define regUVD_GP_SCRATCH16 0x012e
+#define regUVD_GP_SCRATCH16_BASE_IDX 1
+#define regUVD_GP_SCRATCH17 0x012f
+#define regUVD_GP_SCRATCH17_BASE_IDX 1
+#define regUVD_GP_SCRATCH18 0x0130
+#define regUVD_GP_SCRATCH18_BASE_IDX 1
+#define regUVD_GP_SCRATCH19 0x0131
+#define regUVD_GP_SCRATCH19_BASE_IDX 1
+#define regUVD_GP_SCRATCH20 0x0132
+#define regUVD_GP_SCRATCH20_BASE_IDX 1
+#define regUVD_GP_SCRATCH21 0x0133
+#define regUVD_GP_SCRATCH21_BASE_IDX 1
+#define regUVD_GP_SCRATCH22 0x0134
+#define regUVD_GP_SCRATCH22_BASE_IDX 1
+#define regUVD_GP_SCRATCH23 0x0135
+#define regUVD_GP_SCRATCH23_BASE_IDX 1
+#define regUVD_AUDIO_RB_BASE_LO 0x0136
+#define regUVD_AUDIO_RB_BASE_LO_BASE_IDX 1
+#define regUVD_AUDIO_RB_BASE_HI 0x0137
+#define regUVD_AUDIO_RB_BASE_HI_BASE_IDX 1
+#define regUVD_AUDIO_RB_SIZE 0x0138
+#define regUVD_AUDIO_RB_SIZE_BASE_IDX 1
+#define regUVD_VCPU_INT_STATUS2 0x013b
+#define regUVD_VCPU_INT_STATUS2_BASE_IDX 1
+#define regUVD_VCPU_INT_ACK2 0x013c
+#define regUVD_VCPU_INT_ACK2_BASE_IDX 1
+#define regUVD_VCPU_INT_EN2 0x013d
+#define regUVD_VCPU_INT_EN2_BASE_IDX 1
+#define regUVD_SUVD_CGC_STATUS2 0x013e
+#define regUVD_SUVD_CGC_STATUS2_BASE_IDX 1
+#define regUVD_SUVD_INT_STATUS2 0x0140
+#define regUVD_SUVD_INT_STATUS2_BASE_IDX 1
+#define regUVD_SUVD_INT_EN2 0x0141
+#define regUVD_SUVD_INT_EN2_BASE_IDX 1
+#define regUVD_SUVD_INT_ACK2 0x0142
+#define regUVD_SUVD_INT_ACK2_BASE_IDX 1
+#define regUVD_STATUS 0x0143
+#define regUVD_STATUS_BASE_IDX 1
+#define regUVD_ENC_PIPE_BUSY 0x0144
+#define regUVD_ENC_PIPE_BUSY_BASE_IDX 1
+#define regUVD_FW_POWER_STATUS 0x0145
+#define regUVD_FW_POWER_STATUS_BASE_IDX 1
+#define regUVD_CNTL 0x0146
+#define regUVD_CNTL_BASE_IDX 1
+#define regUVD_SOFT_RESET 0x0147
+#define regUVD_SOFT_RESET_BASE_IDX 1
+#define regUVD_SOFT_RESET2 0x0148
+#define regUVD_SOFT_RESET2_BASE_IDX 1
+#define regUVD_MMSCH_SOFT_RESET 0x0149
+#define regUVD_MMSCH_SOFT_RESET_BASE_IDX 1
+#define regUVD_WIG_CTRL 0x014a
+#define regUVD_WIG_CTRL_BASE_IDX 1
+#define regUVD_CGC_STATUS 0x014c
+#define regUVD_CGC_STATUS_BASE_IDX 1
+#define regUVD_CGC_UDEC_STATUS 0x014e
+#define regUVD_CGC_UDEC_STATUS_BASE_IDX 1
+#define regUVD_SUVD_CGC_STATUS 0x0150
+#define regUVD_SUVD_CGC_STATUS_BASE_IDX 1
+#define regUVD_GPCOM_VCPU_CMD 0x0152
+#define regUVD_GPCOM_VCPU_CMD_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_ecpudec
+// base address: 0x1fe00
+#define regUVD_VCPU_CACHE_OFFSET0 0x0180
+#define regUVD_VCPU_CACHE_OFFSET0_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE0 0x0181
+#define regUVD_VCPU_CACHE_SIZE0_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET1 0x0182
+#define regUVD_VCPU_CACHE_OFFSET1_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE1 0x0183
+#define regUVD_VCPU_CACHE_SIZE1_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET2 0x0184
+#define regUVD_VCPU_CACHE_OFFSET2_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE2 0x0185
+#define regUVD_VCPU_CACHE_SIZE2_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET3 0x0186
+#define regUVD_VCPU_CACHE_OFFSET3_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE3 0x0187
+#define regUVD_VCPU_CACHE_SIZE3_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET4 0x0188
+#define regUVD_VCPU_CACHE_OFFSET4_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE4 0x0189
+#define regUVD_VCPU_CACHE_SIZE4_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET5 0x018a
+#define regUVD_VCPU_CACHE_OFFSET5_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE5 0x018b
+#define regUVD_VCPU_CACHE_SIZE5_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET6 0x018c
+#define regUVD_VCPU_CACHE_OFFSET6_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE6 0x018d
+#define regUVD_VCPU_CACHE_SIZE6_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET7 0x018e
+#define regUVD_VCPU_CACHE_OFFSET7_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE7 0x018f
+#define regUVD_VCPU_CACHE_SIZE7_BASE_IDX 1
+#define regUVD_VCPU_CACHE_OFFSET8 0x0190
+#define regUVD_VCPU_CACHE_OFFSET8_BASE_IDX 1
+#define regUVD_VCPU_CACHE_SIZE8 0x0191
+#define regUVD_VCPU_CACHE_SIZE8_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_OFFSET0 0x0192
+#define regUVD_VCPU_NONCACHE_OFFSET0_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_SIZE0 0x0193
+#define regUVD_VCPU_NONCACHE_SIZE0_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_OFFSET1 0x0194
+#define regUVD_VCPU_NONCACHE_OFFSET1_BASE_IDX 1
+#define regUVD_VCPU_NONCACHE_SIZE1 0x0195
+#define regUVD_VCPU_NONCACHE_SIZE1_BASE_IDX 1
+#define regUVD_VCPU_CNTL 0x0196
+#define regUVD_VCPU_CNTL_BASE_IDX 1
+#define regUVD_VCPU_PRID 0x0197
+#define regUVD_VCPU_PRID_BASE_IDX 1
+#define regUVD_VCPU_TRCE 0x0198
+#define regUVD_VCPU_TRCE_BASE_IDX 1
+#define regUVD_VCPU_TRCE_RD 0x0199
+#define regUVD_VCPU_TRCE_RD_BASE_IDX 1
+#define regUVD_VCPU_IND_INDEX 0x019b
+#define regUVD_VCPU_IND_INDEX_BASE_IDX 1
+#define regUVD_VCPU_IND_DATA 0x019c
+#define regUVD_VCPU_IND_DATA_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_uvd_mpcdec
+// base address: 0x1ff30
+#define regUVD_MP_SWAP_CNTL 0x01cc
+#define regUVD_MP_SWAP_CNTL_BASE_IDX 1
+#define regUVD_MP_SWAP_CNTL2 0x01cd
+#define regUVD_MP_SWAP_CNTL2_BASE_IDX 1
+#define regUVD_MPC_LUMA_SRCH 0x01ce
+#define regUVD_MPC_LUMA_SRCH_BASE_IDX 1
+#define regUVD_MPC_LUMA_HIT 0x01cf
+#define regUVD_MPC_LUMA_HIT_BASE_IDX 1
+#define regUVD_MPC_LUMA_HITPEND 0x01d0
+#define regUVD_MPC_LUMA_HITPEND_BASE_IDX 1
+#define regUVD_MPC_CHROMA_SRCH 0x01d1
+#define regUVD_MPC_CHROMA_SRCH_BASE_IDX 1
+#define regUVD_MPC_CHROMA_HIT 0x01d2
+#define regUVD_MPC_CHROMA_HIT_BASE_IDX 1
+#define regUVD_MPC_CHROMA_HITPEND 0x01d3
+#define regUVD_MPC_CHROMA_HITPEND_BASE_IDX 1
+#define regUVD_MPC_CNTL 0x01d4
+#define regUVD_MPC_CNTL_BASE_IDX 1
+#define regUVD_MPC_PITCH 0x01d5
+#define regUVD_MPC_PITCH_BASE_IDX 1
+#define regUVD_MPC_SET_MUXA0 0x01d6
+#define regUVD_MPC_SET_MUXA0_BASE_IDX 1
+#define regUVD_MPC_SET_MUXA1 0x01d7
+#define regUVD_MPC_SET_MUXA1_BASE_IDX 1
+#define regUVD_MPC_SET_MUXB0 0x01d8
+#define regUVD_MPC_SET_MUXB0_BASE_IDX 1
+#define regUVD_MPC_SET_MUXB1 0x01d9
+#define regUVD_MPC_SET_MUXB1_BASE_IDX 1
+#define regUVD_MPC_SET_MUX 0x01da
+#define regUVD_MPC_SET_MUX_BASE_IDX 1
+#define regUVD_MPC_SET_ALU 0x01db
+#define regUVD_MPC_SET_ALU_BASE_IDX 1
+#define regUVD_MPC_PERF0 0x01dc
+#define regUVD_MPC_PERF0_BASE_IDX 1
+#define regUVD_MPC_PERF1 0x01dd
+#define regUVD_MPC_PERF1_BASE_IDX 1
+#define regUVD_MPC_IND_INDEX 0x01de
+#define regUVD_MPC_IND_INDEX_BASE_IDX 1
+#define regUVD_MPC_IND_DATA 0x01df
+#define regUVD_MPC_IND_DATA_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_uvd_rbcdec
+// base address: 0x1ff90
+#define regUVD_RBC_IB_SIZE 0x01e4
+#define regUVD_RBC_IB_SIZE_BASE_IDX 1
+#define regUVD_RBC_IB_SIZE_UPDATE 0x01e5
+#define regUVD_RBC_IB_SIZE_UPDATE_BASE_IDX 1
+#define regUVD_RBC_RB_CNTL 0x01e6
+#define regUVD_RBC_RB_CNTL_BASE_IDX 1
+#define regUVD_RBC_RB_RPTR_ADDR 0x01e7
+#define regUVD_RBC_RB_RPTR_ADDR_BASE_IDX 1
+#define regUVD_RBC_VCPU_ACCESS 0x01ea
+#define regUVD_RBC_VCPU_ACCESS_BASE_IDX 1
+#define regUVD_FW_SEMAPHORE_CNTL 0x01eb
+#define regUVD_FW_SEMAPHORE_CNTL_BASE_IDX 1
+#define regUVD_RBC_READ_REQ_URGENT_CNTL 0x01ed
+#define regUVD_RBC_READ_REQ_URGENT_CNTL_BASE_IDX 1
+#define regUVD_RBC_RB_WPTR_CNTL 0x01ee
+#define regUVD_RBC_RB_WPTR_CNTL_BASE_IDX 1
+#define regUVD_RBC_WPTR_STATUS 0x01ef
+#define regUVD_RBC_WPTR_STATUS_BASE_IDX 1
+#define regUVD_RBC_WPTR_POLL_CNTL 0x01f0
+#define regUVD_RBC_WPTR_POLL_CNTL_BASE_IDX 1
+#define regUVD_RBC_WPTR_POLL_ADDR 0x01f1
+#define regUVD_RBC_WPTR_POLL_ADDR_BASE_IDX 1
+#define regUVD_SEMA_CMD 0x01f2
+#define regUVD_SEMA_CMD_BASE_IDX 1
+#define regUVD_SEMA_ADDR_LOW 0x01f3
+#define regUVD_SEMA_ADDR_LOW_BASE_IDX 1
+#define regUVD_SEMA_ADDR_HIGH 0x01f4
+#define regUVD_SEMA_ADDR_HIGH_BASE_IDX 1
+#define regUVD_ENGINE_CNTL 0x01f5
+#define regUVD_ENGINE_CNTL_BASE_IDX 1
+#define regUVD_SEMA_TIMEOUT_STATUS 0x01f6
+#define regUVD_SEMA_TIMEOUT_STATUS_BASE_IDX 1
+#define regUVD_SEMA_CNTL 0x01f7
+#define regUVD_SEMA_CNTL_BASE_IDX 1
+#define regUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0x01f8
+#define regUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL_BASE_IDX 1
+#define regUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0x01f9
+#define regUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL_BASE_IDX 1
+#define regUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0x01fa
+#define regUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL_BASE_IDX 1
+#define regUVD_JOB_START 0x01fb
+#define regUVD_JOB_START_BASE_IDX 1
+#define regUVD_RBC_BUF_STATUS 0x01fc
+#define regUVD_RBC_BUF_STATUS_BASE_IDX 1
+#define regUVD_RBC_SWAP_CNTL 0x01fd
+#define regUVD_RBC_SWAP_CNTL_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_lmi_adpdec
+// base address: 0x20090
+#define regUVD_LMI_RE_64BIT_BAR_LOW 0x0224
+#define regUVD_LMI_RE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_RE_64BIT_BAR_HIGH 0x0225
+#define regUVD_LMI_RE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_IT_64BIT_BAR_LOW 0x0226
+#define regUVD_LMI_IT_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_IT_64BIT_BAR_HIGH 0x0227
+#define regUVD_LMI_IT_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MP_64BIT_BAR_LOW 0x0228
+#define regUVD_LMI_MP_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MP_64BIT_BAR_HIGH 0x0229
+#define regUVD_LMI_MP_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_CM_64BIT_BAR_LOW 0x022a
+#define regUVD_LMI_CM_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_CM_64BIT_BAR_HIGH 0x022b
+#define regUVD_LMI_CM_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_DB_64BIT_BAR_LOW 0x022c
+#define regUVD_LMI_DB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_DB_64BIT_BAR_HIGH 0x022d
+#define regUVD_LMI_DB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_DBW_64BIT_BAR_LOW 0x022e
+#define regUVD_LMI_DBW_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_DBW_64BIT_BAR_HIGH 0x022f
+#define regUVD_LMI_DBW_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_IDCT_64BIT_BAR_LOW 0x0230
+#define regUVD_LMI_IDCT_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_IDCT_64BIT_BAR_HIGH 0x0231
+#define regUVD_LMI_IDCT_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_LOW 0x0232
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_HIGH 0x0233
+#define regUVD_LMI_MPRD_S0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_LOW 0x0234
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_HIGH 0x0235
+#define regUVD_LMI_MPRD_S1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_LOW 0x0236
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_HIGH 0x0237
+#define regUVD_LMI_MPRD_DBW_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MPC_64BIT_BAR_LOW 0x0238
+#define regUVD_LMI_MPC_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MPC_64BIT_BAR_HIGH 0x0239
+#define regUVD_LMI_MPC_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x023a
+#define regUVD_LMI_RBC_RB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x023b
+#define regUVD_LMI_RBC_RB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x023c
+#define regUVD_LMI_RBC_IB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_RBC_IB_64BIT_BAR_HIGH 0x023d
+#define regUVD_LMI_RBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_LBSI_64BIT_BAR_LOW 0x023e
+#define regUVD_LMI_LBSI_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_LBSI_64BIT_BAR_HIGH 0x023f
+#define regUVD_LMI_LBSI_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW 0x0240
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH 0x0241
+#define regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_LOW 0x0242
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH 0x0243
+#define regUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x0244
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x0245
+#define regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_CENC_64BIT_BAR_LOW 0x0246
+#define regUVD_LMI_CENC_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_CENC_64BIT_BAR_HIGH 0x0247
+#define regUVD_LMI_CENC_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_SRE_64BIT_BAR_LOW 0x0248
+#define regUVD_LMI_SRE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_SRE_64BIT_BAR_HIGH 0x0249
+#define regUVD_LMI_SRE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_LOW 0x024a
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH 0x024b
+#define regUVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW 0x024c
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH 0x024d
+#define regUVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW 0x024e
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH 0x024f
+#define regUVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_REF_64BIT_BAR_LOW 0x0250
+#define regUVD_LMI_MIF_REF_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_REF_64BIT_BAR_HIGH 0x0251
+#define regUVD_LMI_MIF_REF_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_LOW 0x0252
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_HIGH 0x0253
+#define regUVD_LMI_MIF_DBW_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW 0x0254
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH 0x0255
+#define regUVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_LOW 0x0256
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_HIGH 0x0257
+#define regUVD_LMI_MIF_BSP0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_LOW 0x0258
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_HIGH 0x0259
+#define regUVD_LMI_MIF_BSP1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_LOW 0x025a
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_HIGH 0x025b
+#define regUVD_LMI_MIF_BSP2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_LOW 0x025c
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_HIGH 0x025d
+#define regUVD_LMI_MIF_BSP3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_LOW 0x025e
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_HIGH 0x025f
+#define regUVD_LMI_MIF_BSD0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_LOW 0x0260
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_HIGH 0x0261
+#define regUVD_LMI_MIF_BSD1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_LOW 0x0262
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_HIGH 0x0263
+#define regUVD_LMI_MIF_BSD2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_LOW 0x0264
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_HIGH 0x0265
+#define regUVD_LMI_MIF_BSD3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_LOW 0x0266
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_HIGH 0x0267
+#define regUVD_LMI_MIF_BSD4_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW 0x0270
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH 0x0271
+#define regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW 0x0272
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH 0x0273
+#define regUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW 0x0274
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH 0x0275
+#define regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW 0x0276
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH 0x0277
+#define regUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW 0x0278
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH 0x0279
+#define regUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW 0x027a
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH 0x027b
+#define regUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW 0x027c
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH 0x027d
+#define regUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW 0x027e
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH 0x027f
+#define regUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_LOW 0x0280
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_HIGH 0x0281
+#define regUVD_LMI_MIF_SCLR_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_LOW 0x0282
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH 0x0283
+#define regUVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_SPH_64BIT_BAR_HIGH 0x0284
+#define regUVD_LMI_SPH_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW 0x0298
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH 0x0299
+#define regUVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW 0x029a
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH 0x029b
+#define regUVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW 0x029c
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH 0x029d
+#define regUVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW 0x029e
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH 0x029f
+#define regUVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_ADP_ATOMIC_CONFIG 0x02a1
+#define regUVD_ADP_ATOMIC_CONFIG_BASE_IDX 1
+#define regUVD_LMI_ARB_CTRL2 0x02a2
+#define regUVD_LMI_ARB_CTRL2_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_VMIDS_MULTI 0x02a7
+#define regUVD_LMI_VCPU_CACHE_VMIDS_MULTI_BASE_IDX 1
+#define regUVD_LMI_VCPU_NC_VMIDS_MULTI 0x02a8
+#define regUVD_LMI_VCPU_NC_VMIDS_MULTI_BASE_IDX 1
+#define regUVD_LMI_LAT_CTRL 0x02a9
+#define regUVD_LMI_LAT_CTRL_BASE_IDX 1
+#define regUVD_LMI_LAT_CNTR 0x02aa
+#define regUVD_LMI_LAT_CNTR_BASE_IDX 1
+#define regUVD_LMI_AVG_LAT_CNTR 0x02ab
+#define regUVD_LMI_AVG_LAT_CNTR_BASE_IDX 1
+#define regUVD_LMI_SPH 0x02ac
+#define regUVD_LMI_SPH_BASE_IDX 1
+#define regUVD_LMI_VCPU_CACHE_VMID 0x02ad
+#define regUVD_LMI_VCPU_CACHE_VMID_BASE_IDX 1
+#define regUVD_LMI_CTRL2 0x02ae
+#define regUVD_LMI_CTRL2_BASE_IDX 1
+#define regUVD_LMI_URGENT_CTRL 0x02af
+#define regUVD_LMI_URGENT_CTRL_BASE_IDX 1
+#define regUVD_LMI_CTRL 0x02b0
+#define regUVD_LMI_CTRL_BASE_IDX 1
+#define regUVD_LMI_STATUS 0x02b1
+#define regUVD_LMI_STATUS_BASE_IDX 1
+#define regUVD_LMI_PERFMON_CTRL 0x02b4
+#define regUVD_LMI_PERFMON_CTRL_BASE_IDX 1
+#define regUVD_LMI_PERFMON_COUNT_LO 0x02b5
+#define regUVD_LMI_PERFMON_COUNT_LO_BASE_IDX 1
+#define regUVD_LMI_PERFMON_COUNT_HI 0x02b6
+#define regUVD_LMI_PERFMON_COUNT_HI_BASE_IDX 1
+#define regUVD_LMI_ADP_SWAP_CNTL 0x02b7
+#define regUVD_LMI_ADP_SWAP_CNTL_BASE_IDX 1
+#define regUVD_LMI_RBC_RB_VMID 0x02b8
+#define regUVD_LMI_RBC_RB_VMID_BASE_IDX 1
+#define regUVD_LMI_RBC_IB_VMID 0x02b9
+#define regUVD_LMI_RBC_IB_VMID_BASE_IDX 1
+#define regUVD_LMI_MC_CREDITS 0x02ba
+#define regUVD_LMI_MC_CREDITS_BASE_IDX 1
+#define regUVD_LMI_ADP_IND_INDEX 0x02be
+#define regUVD_LMI_ADP_IND_INDEX_BASE_IDX 1
+#define regUVD_LMI_ADP_IND_DATA 0x02bf
+#define regUVD_LMI_ADP_IND_DATA_BASE_IDX 1
+#define regUVD_LMI_ADP_PF_EN 0x02c0
+#define regUVD_LMI_ADP_PF_EN_BASE_IDX 1
+#define regUVD_LMI_PREF_CTRL 0x02c2
+#define regUVD_LMI_PREF_CTRL_BASE_IDX 1
+#define regUVD_LMI_MIF_REF_LUMA_64BIT_BAR_LOW 0x02dd
+#define regUVD_LMI_MIF_REF_LUMA_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH 0x02de
+#define regUVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH_BASE_IDX 1
+#define regVCN_RAS_CNTL 0x02df
+#define regVCN_RAS_CNTL_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_uvd_jpeg0_jpegnpdec
+// base address: 0x20f00
+#define regUVD_JPEG_CNTL 0x05c0
+#define regUVD_JPEG_CNTL_BASE_IDX 1
+#define regUVD_JPEG_RB_BASE 0x05c1
+#define regUVD_JPEG_RB_BASE_BASE_IDX 1
+#define regUVD_JPEG_RB_WPTR 0x05c2
+#define regUVD_JPEG_RB_WPTR_BASE_IDX 1
+#define regUVD_JPEG_RB_RPTR 0x05c3
+#define regUVD_JPEG_RB_RPTR_BASE_IDX 1
+#define regUVD_JPEG_RB_SIZE 0x05c4
+#define regUVD_JPEG_RB_SIZE_BASE_IDX 1
+#define regUVD_JPEG_DEC_CNT 0x05c5
+#define regUVD_JPEG_DEC_CNT_BASE_IDX 1
+#define regUVD_JPEG_SPS_INFO 0x05c6
+#define regUVD_JPEG_SPS_INFO_BASE_IDX 1
+#define regUVD_JPEG_SPS1_INFO 0x05c7
+#define regUVD_JPEG_SPS1_INFO_BASE_IDX 1
+#define regUVD_JPEG_RE_TIMER 0x05c8
+#define regUVD_JPEG_RE_TIMER_BASE_IDX 1
+#define regUVD_JPEG_DEC_SCRATCH0 0x05c9
+#define regUVD_JPEG_DEC_SCRATCH0_BASE_IDX 1
+#define regUVD_JPEG_INT_EN 0x05ca
+#define regUVD_JPEG_INT_EN_BASE_IDX 1
+#define regUVD_JPEG_INT_STAT 0x05cb
+#define regUVD_JPEG_INT_STAT_BASE_IDX 1
+#define regUVD_JPEG_TIER_CNTL0 0x05cc
+#define regUVD_JPEG_TIER_CNTL0_BASE_IDX 1
+#define regUVD_JPEG_TIER_CNTL1 0x05cd
+#define regUVD_JPEG_TIER_CNTL1_BASE_IDX 1
+#define regUVD_JPEG_TIER_CNTL2 0x05ce
+#define regUVD_JPEG_TIER_CNTL2_BASE_IDX 1
+#define regUVD_JPEG_TIER_STATUS 0x05cf
+#define regUVD_JPEG_TIER_STATUS_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_uvd_jpeg_sclk0_jpegnpsclkdec
+// base address: 0x21000
+#define regUVD_JPEG_OUTBUF_CNTL 0x0600
+#define regUVD_JPEG_OUTBUF_CNTL_BASE_IDX 1
+#define regUVD_JPEG_OUTBUF_WPTR 0x0601
+#define regUVD_JPEG_OUTBUF_WPTR_BASE_IDX 1
+#define regUVD_JPEG_OUTBUF_RPTR 0x0602
+#define regUVD_JPEG_OUTBUF_RPTR_BASE_IDX 1
+#define regUVD_JPEG_PITCH 0x0603
+#define regUVD_JPEG_PITCH_BASE_IDX 1
+#define regUVD_JPEG_UV_PITCH 0x0604
+#define regUVD_JPEG_UV_PITCH_BASE_IDX 1
+#define regJPEG_DEC_Y_GFX8_TILING_SURFACE 0x0605
+#define regJPEG_DEC_Y_GFX8_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_UV_GFX8_TILING_SURFACE 0x0606
+#define regJPEG_DEC_UV_GFX8_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_GFX8_ADDR_CONFIG 0x0607
+#define regJPEG_DEC_GFX8_ADDR_CONFIG_BASE_IDX 1
+#define regJPEG_DEC_Y_GFX10_TILING_SURFACE 0x0608
+#define regJPEG_DEC_Y_GFX10_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_UV_GFX10_TILING_SURFACE 0x0609
+#define regJPEG_DEC_UV_GFX10_TILING_SURFACE_BASE_IDX 1
+#define regJPEG_DEC_GFX10_ADDR_CONFIG 0x060a
+#define regJPEG_DEC_GFX10_ADDR_CONFIG_BASE_IDX 1
+#define regJPEG_DEC_ADDR_MODE 0x060b
+#define regJPEG_DEC_ADDR_MODE_BASE_IDX 1
+#define regUVD_JPEG_OUTPUT_XY 0x060c
+#define regUVD_JPEG_OUTPUT_XY_BASE_IDX 1
+#define regUVD_JPEG_GPCOM_CMD 0x060d
+#define regUVD_JPEG_GPCOM_CMD_BASE_IDX 1
+#define regUVD_JPEG_GPCOM_DATA0 0x060e
+#define regUVD_JPEG_GPCOM_DATA0_BASE_IDX 1
+#define regUVD_JPEG_GPCOM_DATA1 0x060f
+#define regUVD_JPEG_GPCOM_DATA1_BASE_IDX 1
+#define regUVD_JPEG_SCRATCH1 0x0610
+#define regUVD_JPEG_SCRATCH1_BASE_IDX 1
+#define regUVD_JPEG_DEC_SOFT_RST 0x0611
+#define regUVD_JPEG_DEC_SOFT_RST_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_uvd_jrbc0_uvd_jrbc_dec
+// base address: 0x21100
+#define regUVD_JRBC0_UVD_JRBC_RB_WPTR 0x0640
+#define regUVD_JRBC0_UVD_JRBC_RB_WPTR_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_RB_CNTL 0x0641
+#define regUVD_JRBC0_UVD_JRBC_RB_CNTL_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_IB_SIZE 0x0642
+#define regUVD_JRBC0_UVD_JRBC_IB_SIZE_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_URGENT_CNTL 0x0643
+#define regUVD_JRBC0_UVD_JRBC_URGENT_CNTL_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_RB_REF_DATA 0x0644
+#define regUVD_JRBC0_UVD_JRBC_RB_REF_DATA_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER 0x0645
+#define regUVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_SOFT_RESET 0x0648
+#define regUVD_JRBC0_UVD_JRBC_SOFT_RESET_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_STATUS 0x0649
+#define regUVD_JRBC0_UVD_JRBC_STATUS_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_RB_RPTR 0x064a
+#define regUVD_JRBC0_UVD_JRBC_RB_RPTR_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_RB_BUF_STATUS 0x064b
+#define regUVD_JRBC0_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_IB_BUF_STATUS 0x064c
+#define regUVD_JRBC0_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_IB_SIZE_UPDATE 0x064d
+#define regUVD_JRBC0_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER 0x064e
+#define regUVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_IB_REF_DATA 0x064f
+#define regUVD_JRBC0_UVD_JRBC_IB_REF_DATA_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JPEG_PREEMPT_CMD 0x0650
+#define regUVD_JRBC0_UVD_JPEG_PREEMPT_CMD_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0651
+#define regUVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0652
+#define regUVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_RB_SIZE 0x0653
+#define regUVD_JRBC0_UVD_JRBC_RB_SIZE_BASE_IDX 1
+#define regUVD_JRBC0_UVD_JRBC_SCRATCH0 0x0654
+#define regUVD_JRBC0_UVD_JRBC_SCRATCH0_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_uvd_jmi0_uvd_jmi_dec
+// base address: 0x21180
+#define regUVD_JMI0_UVD_JPEG_DEC_PF_CTRL 0x0660
+#define regUVD_JMI0_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JRBC_CTRL 0x0661
+#define regUVD_JMI0_UVD_LMI_JRBC_CTRL_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JPEG_CTRL 0x0662
+#define regUVD_JMI0_UVD_LMI_JPEG_CTRL_BASE_IDX 1
+#define regUVD_JMI0_JPEG_LMI_DROP 0x0663
+#define regUVD_JMI0_JPEG_LMI_DROP_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JRBC_IB_VMID 0x0664
+#define regUVD_JMI0_UVD_LMI_JRBC_IB_VMID_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JRBC_RB_VMID 0x0665
+#define regUVD_JMI0_UVD_LMI_JRBC_RB_VMID_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JPEG_VMID 0x0666
+#define regUVD_JMI0_UVD_LMI_JPEG_VMID_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0667
+#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0668
+#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0669
+#define regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x066a
+#define regUVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x066b
+#define regUVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x066c
+#define regUVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_VMID 0x066d
+#define regUVD_JMI0_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_DEC_SWAP_CNTL 0x066e
+#define regUVD_JMI0_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL 0x066f
+#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0670
+#define regUVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0671
+#define regUVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0672
+#define regUVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0673
+#define regUVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0674
+#define regUVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0675
+#define regUVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0676
+#define regUVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0677
+#define regUVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0678
+#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0679
+#define regUVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL2 0x067d
+#define regUVD_JMI0_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_uvd_jmi_common_dec
+// base address: 0x21300
+#define regUVD_JADP_MCIF_URGENT_CTRL 0x06c1
+#define regUVD_JADP_MCIF_URGENT_CTRL_BASE_IDX 1
+#define regUVD_JMI_URGENT_CTRL 0x06c2
+#define regUVD_JMI_URGENT_CTRL_BASE_IDX 1
+#define regUVD_JMI_CTRL 0x06c3
+#define regUVD_JMI_CTRL_BASE_IDX 1
+#define regJPEG_MEMCHECK_CLAMPING_CNTL 0x06c4
+#define regJPEG_MEMCHECK_CLAMPING_CNTL_BASE_IDX 1
+#define regJPEG_MEMCHECK_SAFE_ADDR 0x06c5
+#define regJPEG_MEMCHECK_SAFE_ADDR_BASE_IDX 1
+#define regJPEG_MEMCHECK_SAFE_ADDR_64BIT 0x06c6
+#define regJPEG_MEMCHECK_SAFE_ADDR_64BIT_BASE_IDX 1
+#define regUVD_JMI_LAT_CTRL 0x06c7
+#define regUVD_JMI_LAT_CTRL_BASE_IDX 1
+#define regUVD_JMI_LAT_CNTR 0x06c8
+#define regUVD_JMI_LAT_CNTR_BASE_IDX 1
+#define regUVD_JMI_AVG_LAT_CNTR 0x06c9
+#define regUVD_JMI_AVG_LAT_CNTR_BASE_IDX 1
+#define regUVD_JMI_PERFMON_CTRL 0x06ca
+#define regUVD_JMI_PERFMON_CTRL_BASE_IDX 1
+#define regUVD_JMI_PERFMON_COUNT_LO 0x06cb
+#define regUVD_JMI_PERFMON_COUNT_LO_BASE_IDX 1
+#define regUVD_JMI_PERFMON_COUNT_HI 0x06cc
+#define regUVD_JMI_PERFMON_COUNT_HI_BASE_IDX 1
+#define regUVD_JMI_CLEAN_STATUS 0x06cd
+#define regUVD_JMI_CLEAN_STATUS_BASE_IDX 1
+#define regUVD_JMI_CNTL 0x06ce
+#define regUVD_JMI_CNTL_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_uvd_jpeg_common_dec
+// base address: 0x21400
+#define regJPEG_SOFT_RESET_STATUS 0x0700
+#define regJPEG_SOFT_RESET_STATUS_BASE_IDX 1
+#define regJPEG_SYS_INT_EN 0x0701
+#define regJPEG_SYS_INT_EN_BASE_IDX 1
+#define regJPEG_SYS_INT_EN1 0x0702
+#define regJPEG_SYS_INT_EN1_BASE_IDX 1
+#define regJPEG_SYS_INT_STATUS 0x0703
+#define regJPEG_SYS_INT_STATUS_BASE_IDX 1
+#define regJPEG_SYS_INT_STATUS1 0x0704
+#define regJPEG_SYS_INT_STATUS1_BASE_IDX 1
+#define regJPEG_SYS_INT_ACK 0x0705
+#define regJPEG_SYS_INT_ACK_BASE_IDX 1
+#define regJPEG_SYS_INT_ACK1 0x0706
+#define regJPEG_SYS_INT_ACK1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_EN 0x0707
+#define regJPEG_MEMCHECK_SYS_INT_EN_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_EN1 0x0708
+#define regJPEG_MEMCHECK_SYS_INT_EN1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_STAT 0x0709
+#define regJPEG_MEMCHECK_SYS_INT_STAT_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_STAT1 0x070a
+#define regJPEG_MEMCHECK_SYS_INT_STAT1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_STAT2 0x070b
+#define regJPEG_MEMCHECK_SYS_INT_STAT2_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_ACK 0x070c
+#define regJPEG_MEMCHECK_SYS_INT_ACK_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_ACK1 0x070d
+#define regJPEG_MEMCHECK_SYS_INT_ACK1_BASE_IDX 1
+#define regJPEG_MEMCHECK_SYS_INT_ACK2 0x070e
+#define regJPEG_MEMCHECK_SYS_INT_ACK2_BASE_IDX 1
+#define regJPEG_MASTINT_EN 0x0710
+#define regJPEG_MASTINT_EN_BASE_IDX 1
+#define regJPEG_IH_CTRL 0x0711
+#define regJPEG_IH_CTRL_BASE_IDX 1
+#define regJRBBM_ARB_CTRL 0x0713
+#define regJRBBM_ARB_CTRL_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_uvd_jpeg_common_sclk_dec
+// base address: 0x21480
+#define regJPEG_CGC_GATE 0x0720
+#define regJPEG_CGC_GATE_BASE_IDX 1
+#define regJPEG_CGC_CTRL 0x0721
+#define regJPEG_CGC_CTRL_BASE_IDX 1
+#define regJPEG_CGC_STATUS 0x0722
+#define regJPEG_CGC_STATUS_BASE_IDX 1
+#define regJPEG_COMN_CGC_MEM_CTRL 0x0723
+#define regJPEG_COMN_CGC_MEM_CTRL_BASE_IDX 1
+#define regJPEG_DEC_CGC_MEM_CTRL 0x0724
+#define regJPEG_DEC_CGC_MEM_CTRL_BASE_IDX 1
+#define regJPEG_ENC_CGC_MEM_CTRL 0x0726
+#define regJPEG_ENC_CGC_MEM_CTRL_BASE_IDX 1
+#define regJPEG_PERF_BANK_CONF 0x0727
+#define regJPEG_PERF_BANK_CONF_BASE_IDX 1
+#define regJPEG_PERF_BANK_EVENT_SEL 0x0728
+#define regJPEG_PERF_BANK_EVENT_SEL_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT0 0x0729
+#define regJPEG_PERF_BANK_COUNT0_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT1 0x072a
+#define regJPEG_PERF_BANK_COUNT1_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT2 0x072b
+#define regJPEG_PERF_BANK_COUNT2_BASE_IDX 1
+#define regJPEG_PERF_BANK_COUNT3 0x072c
+#define regJPEG_PERF_BANK_COUNT3_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_uvd_pg_dec
+// base address: 0x1f800
+#define regUVD_PGFSM_CONFIG 0x0000
+#define regUVD_PGFSM_CONFIG_BASE_IDX 1
+#define regUVD_PGFSM_STATUS 0x0001
+#define regUVD_PGFSM_STATUS_BASE_IDX 1
+#define regUVD_POWER_STATUS 0x0002
+#define regUVD_POWER_STATUS_BASE_IDX 1
+#define regUVD_JPEG_POWER_STATUS 0x0003
+#define regUVD_JPEG_POWER_STATUS_BASE_IDX 1
+#define regUVD_MC_DJPEG_RD_SPACE 0x0006
+#define regUVD_MC_DJPEG_RD_SPACE_BASE_IDX 1
+#define regUVD_MC_DJPEG_WR_SPACE 0x0007
+#define regUVD_MC_DJPEG_WR_SPACE_BASE_IDX 1
+#define regUVD_MC_EJPEG_RD_SPACE 0x0008
+#define regUVD_MC_EJPEG_RD_SPACE_BASE_IDX 1
+#define regUVD_MC_EJPEG_WR_SPACE 0x0009
+#define regUVD_MC_EJPEG_WR_SPACE_BASE_IDX 1
+#define regUVD_PG_IND_INDEX 0x000c
+#define regUVD_PG_IND_INDEX_BASE_IDX 1
+#define regUVD_PG_IND_DATA 0x000e
+#define regUVD_PG_IND_DATA_BASE_IDX 1
+#define regCC_UVD_HARVESTING 0x000f
+#define regCC_UVD_HARVESTING_BASE_IDX 1
+#define regUVD_DPG_LMA_CTL 0x0011
+#define regUVD_DPG_LMA_CTL_BASE_IDX 1
+#define regUVD_DPG_LMA_DATA 0x0012
+#define regUVD_DPG_LMA_DATA_BASE_IDX 1
+#define regUVD_DPG_LMA_MASK 0x0013
+#define regUVD_DPG_LMA_MASK_BASE_IDX 1
+#define regUVD_DPG_PAUSE 0x0014
+#define regUVD_DPG_PAUSE_BASE_IDX 1
+#define regUVD_SCRATCH1 0x0015
+#define regUVD_SCRATCH1_BASE_IDX 1
+#define regUVD_SCRATCH2 0x0016
+#define regUVD_SCRATCH2_BASE_IDX 1
+#define regUVD_SCRATCH3 0x0017
+#define regUVD_SCRATCH3_BASE_IDX 1
+#define regUVD_SCRATCH4 0x0018
+#define regUVD_SCRATCH4_BASE_IDX 1
+#define regUVD_SCRATCH5 0x0019
+#define regUVD_SCRATCH5_BASE_IDX 1
+#define regUVD_SCRATCH6 0x001a
+#define regUVD_SCRATCH6_BASE_IDX 1
+#define regUVD_SCRATCH7 0x001b
+#define regUVD_SCRATCH7_BASE_IDX 1
+#define regUVD_SCRATCH8 0x001c
+#define regUVD_SCRATCH8_BASE_IDX 1
+#define regUVD_SCRATCH9 0x001d
+#define regUVD_SCRATCH9_BASE_IDX 1
+#define regUVD_SCRATCH10 0x001e
+#define regUVD_SCRATCH10_BASE_IDX 1
+#define regUVD_SCRATCH11 0x001f
+#define regUVD_SCRATCH11_BASE_IDX 1
+#define regUVD_SCRATCH12 0x0020
+#define regUVD_SCRATCH12_BASE_IDX 1
+#define regUVD_SCRATCH13 0x0021
+#define regUVD_SCRATCH13_BASE_IDX 1
+#define regUVD_SCRATCH14 0x0022
+#define regUVD_SCRATCH14_BASE_IDX 1
+#define regUVD_FREE_COUNTER_REG 0x0023
+#define regUVD_FREE_COUNTER_REG_BASE_IDX 1
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x0024
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x0025
+#define regUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_DPG_VCPU_CACHE_OFFSET0 0x0026
+#define regUVD_DPG_VCPU_CACHE_OFFSET0_BASE_IDX 1
+#define regUVD_DPG_LMI_VCPU_CACHE_VMID 0x0027
+#define regUVD_DPG_LMI_VCPU_CACHE_VMID_BASE_IDX 1
+#define regUVD_REG_FILTER_EN 0x0028
+#define regUVD_REG_FILTER_EN_BASE_IDX 1
+#define regUVD_SECURITY_REG_VIO_REPORT 0x0029
+#define regUVD_SECURITY_REG_VIO_REPORT_BASE_IDX 1
+#define regUVD_FW_VERSION 0x002a
+#define regUVD_FW_VERSION_BASE_IDX 1
+#define regUVD_PF_STATUS 0x002c
+#define regUVD_PF_STATUS_BASE_IDX 1
+#define regUVD_DPG_CLK_EN_VCPU_REPORT 0x002e
+#define regUVD_DPG_CLK_EN_VCPU_REPORT_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_LO 0x002f
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_LO_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_HI 0x0030
+#define regCC_UVD_VCPU_ERR_DETECT_BOT_HI_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_LO 0x0031
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_LO_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_HI 0x0032
+#define regCC_UVD_VCPU_ERR_DETECT_TOP_HI_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR 0x0033
+#define regCC_UVD_VCPU_ERR_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_INST_ADDR_LO 0x0034
+#define regCC_UVD_VCPU_ERR_INST_ADDR_LO_BASE_IDX 1
+#define regCC_UVD_VCPU_ERR_INST_ADDR_HI 0x0035
+#define regCC_UVD_VCPU_ERR_INST_ADDR_HI_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC_SPACE 0x003d
+#define regUVD_LMI_MMSCH_NC_SPACE_BASE_IDX 1
+#define regUVD_LMI_ATOMIC_SPACE 0x003e
+#define regUVD_LMI_ATOMIC_SPACE_BASE_IDX 1
+#define regUVD_GFX8_ADDR_CONFIG 0x0041
+#define regUVD_GFX8_ADDR_CONFIG_BASE_IDX 1
+#define regUVD_GFX10_ADDR_CONFIG 0x0042
+#define regUVD_GFX10_ADDR_CONFIG_BASE_IDX 1
+#define regUVD_GPCNT2_CNTL 0x0043
+#define regUVD_GPCNT2_CNTL_BASE_IDX 1
+#define regUVD_GPCNT2_TARGET_LOWER 0x0044
+#define regUVD_GPCNT2_TARGET_LOWER_BASE_IDX 1
+#define regUVD_GPCNT2_STATUS_LOWER 0x0045
+#define regUVD_GPCNT2_STATUS_LOWER_BASE_IDX 1
+#define regUVD_GPCNT2_TARGET_UPPER 0x0046
+#define regUVD_GPCNT2_TARGET_UPPER_BASE_IDX 1
+#define regUVD_GPCNT2_STATUS_UPPER 0x0047
+#define regUVD_GPCNT2_STATUS_UPPER_BASE_IDX 1
+#define regUVD_GPCNT3_CNTL 0x0048
+#define regUVD_GPCNT3_CNTL_BASE_IDX 1
+#define regUVD_GPCNT3_TARGET_LOWER 0x0049
+#define regUVD_GPCNT3_TARGET_LOWER_BASE_IDX 1
+#define regUVD_GPCNT3_STATUS_LOWER 0x004a
+#define regUVD_GPCNT3_STATUS_LOWER_BASE_IDX 1
+#define regUVD_GPCNT3_TARGET_UPPER 0x004b
+#define regUVD_GPCNT3_TARGET_UPPER_BASE_IDX 1
+#define regUVD_GPCNT3_STATUS_UPPER 0x004c
+#define regUVD_GPCNT3_STATUS_UPPER_BASE_IDX 1
+#define regUVD_VCLK_DS_CNTL 0x004d
+#define regUVD_VCLK_DS_CNTL_BASE_IDX 1
+#define regUVD_DCLK_DS_CNTL 0x004e
+#define regUVD_DCLK_DS_CNTL_BASE_IDX 1
+#define regUVD_TSC_LOWER 0x004f
+#define regUVD_TSC_LOWER_BASE_IDX 1
+#define regUVD_TSC_UPPER 0x0050
+#define regUVD_TSC_UPPER_BASE_IDX 1
+#define regVCN_FEATURES 0x0051
+#define regVCN_FEATURES_BASE_IDX 1
+#define regUVD_GPUIOV_STATUS 0x0055
+#define regUVD_GPUIOV_STATUS_BASE_IDX 1
+#define regUVD_RAS_VCPU_VCODEC_STATUS 0x0057
+#define regUVD_RAS_VCPU_VCODEC_STATUS_BASE_IDX 1
+#define regUVD_RAS_MMSCH_FATAL_ERROR 0x0058
+#define regUVD_RAS_MMSCH_FATAL_ERROR_BASE_IDX 1
+#define regUVD_RAS_JPEG0_STATUS 0x0059
+#define regUVD_RAS_JPEG0_STATUS_BASE_IDX 1
+#define regUVD_RAS_JPEG1_STATUS 0x005a
+#define regUVD_RAS_JPEG1_STATUS_BASE_IDX 1
+#define regUVD_RAS_CNTL_PMI_ARB 0x005b
+#define regUVD_RAS_CNTL_PMI_ARB_BASE_IDX 1
+#define regUVD_SCRATCH15 0x005c
+#define regUVD_SCRATCH15_BASE_IDX 1
+#define regVCN_JPEG_DB_CTRL1 0x005d
+#define regVCN_JPEG_DB_CTRL1_BASE_IDX 1
+#define regVCN_JPEG_DB_CTRL2 0x005e
+#define regVCN_JPEG_DB_CTRL2_BASE_IDX 1
+#define regVCN_JPEG_DB_CTRL3 0x005f
+#define regVCN_JPEG_DB_CTRL3_BASE_IDX 1
+#define regVCN_JPEG_DB_CTRL4 0x0060
+#define regVCN_JPEG_DB_CTRL4_BASE_IDX 1
+#define regVCN_JPEG_DB_CTRL5 0x0061
+#define regVCN_JPEG_DB_CTRL5_BASE_IDX 1
+#define regVCN_JPEG_DB_CTRL6 0x0062
+#define regVCN_JPEG_DB_CTRL6_BASE_IDX 1
+#define regVCN_JPEG_DB_CTRL7 0x0063
+#define regVCN_JPEG_DB_CTRL7_BASE_IDX 1
+#define regUVD_SCRATCH32 0x006d
+#define regUVD_SCRATCH32_BASE_IDX 1
+#define regUVD_VERSION 0x006e
+#define regUVD_VERSION_BASE_IDX 1
+#define regVCN_RB_DB_CTRL 0x0070
+#define regVCN_RB_DB_CTRL_BASE_IDX 1
+#define regVCN_JPEG_DB_CTRL 0x0071
+#define regVCN_JPEG_DB_CTRL_BASE_IDX 1
+#define regVCN_RB1_DB_CTRL 0x0072
+#define regVCN_RB1_DB_CTRL_BASE_IDX 1
+#define regVCN_RB2_DB_CTRL 0x0073
+#define regVCN_RB2_DB_CTRL_BASE_IDX 1
+#define regVCN_RB3_DB_CTRL 0x0074
+#define regVCN_RB3_DB_CTRL_BASE_IDX 1
+#define regVCN_RB4_DB_CTRL 0x0075
+#define regVCN_RB4_DB_CTRL_BASE_IDX 1
+#define regVCN_RB_ENABLE 0x0085
+#define regVCN_RB_ENABLE_BASE_IDX 1
+#define regVCN_RB_WPTR_CTRL 0x0086
+#define regVCN_RB_WPTR_CTRL_BASE_IDX 1
+#define regUVD_RB_RPTR 0x00ac
+#define regUVD_RB_RPTR_BASE_IDX 1
+#define regUVD_RB_WPTR 0x00ad
+#define regUVD_RB_WPTR_BASE_IDX 1
+#define regUVD_RB_RPTR2 0x00ae
+#define regUVD_RB_RPTR2_BASE_IDX 1
+#define regUVD_RB_WPTR2 0x00af
+#define regUVD_RB_WPTR2_BASE_IDX 1
+#define regUVD_RB_RPTR3 0x00b0
+#define regUVD_RB_RPTR3_BASE_IDX 1
+#define regUVD_RB_WPTR3 0x00b1
+#define regUVD_RB_WPTR3_BASE_IDX 1
+#define regUVD_RB_RPTR4 0x00b2
+#define regUVD_RB_RPTR4_BASE_IDX 1
+#define regUVD_RB_WPTR4 0x00b3
+#define regUVD_RB_WPTR4_BASE_IDX 1
+#define regUVD_OUT_RB_RPTR 0x00b4
+#define regUVD_OUT_RB_RPTR_BASE_IDX 1
+#define regUVD_OUT_RB_WPTR 0x00b5
+#define regUVD_OUT_RB_WPTR_BASE_IDX 1
+#define regUVD_AUDIO_RB_RPTR 0x00b6
+#define regUVD_AUDIO_RB_RPTR_BASE_IDX 1
+#define regUVD_AUDIO_RB_WPTR 0x00b7
+#define regUVD_AUDIO_RB_WPTR_BASE_IDX 1
+#define regUVD_RBC_RB_RPTR 0x00b8
+#define regUVD_RBC_RB_RPTR_BASE_IDX 1
+#define regUVD_RBC_RB_WPTR 0x00b9
+#define regUVD_RBC_RB_WPTR_BASE_IDX 1
+#define regUVD_DPG_LMA_CTL2 0x00bb
+#define regUVD_DPG_LMA_CTL2_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_mmsch_dec
+// base address: 0x20d00
+#define regMMSCH_UCODE_ADDR 0x0540
+#define regMMSCH_UCODE_ADDR_BASE_IDX 1
+#define regMMSCH_UCODE_DATA 0x0541
+#define regMMSCH_UCODE_DATA_BASE_IDX 1
+#define regMMSCH_SRAM_ADDR 0x0542
+#define regMMSCH_SRAM_ADDR_BASE_IDX 1
+#define regMMSCH_SRAM_DATA 0x0543
+#define regMMSCH_SRAM_DATA_BASE_IDX 1
+#define regMMSCH_VF_SRAM_OFFSET 0x0544
+#define regMMSCH_VF_SRAM_OFFSET_BASE_IDX 1
+#define regMMSCH_DB_SRAM_OFFSET 0x0545
+#define regMMSCH_DB_SRAM_OFFSET_BASE_IDX 1
+#define regMMSCH_CTX_SRAM_OFFSET 0x0546
+#define regMMSCH_CTX_SRAM_OFFSET_BASE_IDX 1
+#define regMMSCH_CTL 0x0547
+#define regMMSCH_CTL_BASE_IDX 1
+#define regMMSCH_INTR 0x0548
+#define regMMSCH_INTR_BASE_IDX 1
+#define regMMSCH_INTR_ACK 0x0549
+#define regMMSCH_INTR_ACK_BASE_IDX 1
+#define regMMSCH_INTR_STATUS 0x054a
+#define regMMSCH_INTR_STATUS_BASE_IDX 1
+#define regMMSCH_VF_VMID 0x054b
+#define regMMSCH_VF_VMID_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_LO 0x054c
+#define regMMSCH_VF_CTX_ADDR_LO_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_HI 0x054d
+#define regMMSCH_VF_CTX_ADDR_HI_BASE_IDX 1
+#define regMMSCH_VF_CTX_SIZE 0x054e
+#define regMMSCH_VF_CTX_SIZE_BASE_IDX 1
+#define regMMSCH_VF_GPCOM_ADDR_LO 0x054f
+#define regMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX 1
+#define regMMSCH_VF_GPCOM_ADDR_HI 0x0550
+#define regMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX 1
+#define regMMSCH_VF_GPCOM_SIZE 0x0551
+#define regMMSCH_VF_GPCOM_SIZE_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_HOST 0x0552
+#define regMMSCH_VF_MAILBOX_HOST_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_RESP 0x0553
+#define regMMSCH_VF_MAILBOX_RESP_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_0 0x0554
+#define regMMSCH_VF_MAILBOX_0_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_0_RESP 0x0555
+#define regMMSCH_VF_MAILBOX_0_RESP_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_1 0x0556
+#define regMMSCH_VF_MAILBOX_1_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_1_RESP 0x0557
+#define regMMSCH_VF_MAILBOX_1_RESP_BASE_IDX 1
+#define regMMSCH_CNTL 0x055c
+#define regMMSCH_CNTL_BASE_IDX 1
+#define regMMSCH_NONCACHE_OFFSET0 0x055d
+#define regMMSCH_NONCACHE_OFFSET0_BASE_IDX 1
+#define regMMSCH_NONCACHE_SIZE0 0x055e
+#define regMMSCH_NONCACHE_SIZE0_BASE_IDX 1
+#define regMMSCH_NONCACHE_OFFSET1 0x055f
+#define regMMSCH_NONCACHE_OFFSET1_BASE_IDX 1
+#define regMMSCH_NONCACHE_SIZE1 0x0560
+#define regMMSCH_NONCACHE_SIZE1_BASE_IDX 1
+#define regMMSCH_PROC_STATE1 0x0566
+#define regMMSCH_PROC_STATE1_BASE_IDX 1
+#define regMMSCH_LAST_MC_ADDR 0x0567
+#define regMMSCH_LAST_MC_ADDR_BASE_IDX 1
+#define regMMSCH_LAST_MEM_ACCESS_HI 0x0568
+#define regMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX 1
+#define regMMSCH_LAST_MEM_ACCESS_LO 0x0569
+#define regMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX 1
+#define regMMSCH_IOV_ACTIVE_FCN_ID 0x056a
+#define regMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 1
+#define regMMSCH_SCRATCH_0 0x056b
+#define regMMSCH_SCRATCH_0_BASE_IDX 1
+#define regMMSCH_SCRATCH_1 0x056c
+#define regMMSCH_SCRATCH_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_SCH_BLOCK_0 0x056d
+#define regMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_CMD_CONTROL_0 0x056e
+#define regMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_CMD_STATUS_0 0x056f
+#define regMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_VM_BUSY_STATUS_0 0x0570
+#define regMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_ACTIVE_FCNS_0 0x0571
+#define regMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_0 0x0572
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_DW6_0 0x0573
+#define regMMSCH_GPUIOV_DW6_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_DW7_0 0x0574
+#define regMMSCH_GPUIOV_DW7_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_DW8_0 0x0575
+#define regMMSCH_GPUIOV_DW8_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_SCH_BLOCK_1 0x0576
+#define regMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_CMD_CONTROL_1 0x0577
+#define regMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_CMD_STATUS_1 0x0578
+#define regMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_VM_BUSY_STATUS_1 0x0579
+#define regMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_ACTIVE_FCNS_1 0x057a
+#define regMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_1 0x057b
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_DW6_1 0x057c
+#define regMMSCH_GPUIOV_DW6_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_DW7_1 0x057d
+#define regMMSCH_GPUIOV_DW7_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_DW8_1 0x057e
+#define regMMSCH_GPUIOV_DW8_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_CNTXT 0x057f
+#define regMMSCH_GPUIOV_CNTXT_BASE_IDX 1
+#define regMMSCH_SCRATCH_2 0x0580
+#define regMMSCH_SCRATCH_2_BASE_IDX 1
+#define regMMSCH_SCRATCH_3 0x0581
+#define regMMSCH_SCRATCH_3_BASE_IDX 1
+#define regMMSCH_SCRATCH_4 0x0582
+#define regMMSCH_SCRATCH_4_BASE_IDX 1
+#define regMMSCH_SCRATCH_5 0x0583
+#define regMMSCH_SCRATCH_5_BASE_IDX 1
+#define regMMSCH_SCRATCH_6 0x0584
+#define regMMSCH_SCRATCH_6_BASE_IDX 1
+#define regMMSCH_SCRATCH_7 0x0585
+#define regMMSCH_SCRATCH_7_BASE_IDX 1
+#define regMMSCH_VFID_FIFO_HEAD_0 0x0586
+#define regMMSCH_VFID_FIFO_HEAD_0_BASE_IDX 1
+#define regMMSCH_VFID_FIFO_TAIL_0 0x0587
+#define regMMSCH_VFID_FIFO_TAIL_0_BASE_IDX 1
+#define regMMSCH_VFID_FIFO_HEAD_1 0x0588
+#define regMMSCH_VFID_FIFO_HEAD_1_BASE_IDX 1
+#define regMMSCH_VFID_FIFO_TAIL_1 0x0589
+#define regMMSCH_VFID_FIFO_TAIL_1_BASE_IDX 1
+#define regMMSCH_NACK_STATUS 0x058a
+#define regMMSCH_NACK_STATUS_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX0_DATA 0x058b
+#define regMMSCH_VF_MAILBOX0_DATA_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX1_DATA 0x058c
+#define regMMSCH_VF_MAILBOX1_DATA_BASE_IDX 1
+#define regMMSCH_GPUIOV_SCH_BLOCK_IP_0 0x058d
+#define regMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_CMD_STATUS_IP_0 0x058e
+#define regMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 0x058f
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX 1
+#define regMMSCH_GPUIOV_SCH_BLOCK_IP_1 0x0590
+#define regMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_CMD_STATUS_IP_1 0x0591
+#define regMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 0x0592
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX 1
+#define regMMSCH_GPUIOV_CNTXT_IP 0x0593
+#define regMMSCH_GPUIOV_CNTXT_IP_BASE_IDX 1
+#define regMMSCH_GPUIOV_SCH_BLOCK_2 0x0594
+#define regMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX 1
+#define regMMSCH_GPUIOV_CMD_CONTROL_2 0x0595
+#define regMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX 1
+#define regMMSCH_GPUIOV_CMD_STATUS_2 0x0596
+#define regMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX 1
+#define regMMSCH_GPUIOV_VM_BUSY_STATUS_2 0x0597
+#define regMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX 1
+#define regMMSCH_GPUIOV_ACTIVE_FCNS_2 0x0598
+#define regMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX 1
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_2 0x0599
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX 1
+#define regMMSCH_GPUIOV_DW6_2 0x059a
+#define regMMSCH_GPUIOV_DW6_2_BASE_IDX 1
+#define regMMSCH_GPUIOV_DW7_2 0x059b
+#define regMMSCH_GPUIOV_DW7_2_BASE_IDX 1
+#define regMMSCH_GPUIOV_DW8_2 0x059c
+#define regMMSCH_GPUIOV_DW8_2_BASE_IDX 1
+#define regMMSCH_GPUIOV_SCH_BLOCK_IP_2 0x059d
+#define regMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX 1
+#define regMMSCH_GPUIOV_CMD_STATUS_IP_2 0x059e
+#define regMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX 1
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 0x059f
+#define regMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX 1
+#define regMMSCH_VFID_FIFO_HEAD_2 0x05a0
+#define regMMSCH_VFID_FIFO_HEAD_2_BASE_IDX 1
+#define regMMSCH_VFID_FIFO_TAIL_2 0x05a1
+#define regMMSCH_VFID_FIFO_TAIL_2_BASE_IDX 1
+#define regMMSCH_VM_BUSY_STATUS_0 0x05a2
+#define regMMSCH_VM_BUSY_STATUS_0_BASE_IDX 1
+#define regMMSCH_VM_BUSY_STATUS_1 0x05a3
+#define regMMSCH_VM_BUSY_STATUS_1_BASE_IDX 1
+#define regMMSCH_VM_BUSY_STATUS_2 0x05a4
+#define regMMSCH_VM_BUSY_STATUS_2_BASE_IDX 1
+
+
+// addressBlock: aid_uvd0_slmi_adpdec
+// base address: 0x21c00
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW 0x0900
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH 0x0901
+#define regUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW 0x0902
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH 0x0903
+#define regUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW 0x0904
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH 0x0905
+#define regUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW 0x0906
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH 0x0907
+#define regUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW 0x0908
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH 0x0909
+#define regUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW 0x090a
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH 0x090b
+#define regUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW 0x090c
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH 0x090d
+#define regUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW 0x090e
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH 0x090f
+#define regUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH_BASE_IDX 1
+#define regUVD_LMI_MMSCH_NC_VMID 0x0910
+#define regUVD_LMI_MMSCH_NC_VMID_BASE_IDX 1
+#define regUVD_LMI_MMSCH_CTRL 0x0911
+#define regUVD_LMI_MMSCH_CTRL_BASE_IDX 1
+#define regUVD_MMSCH_LMI_STATUS 0x0912
+#define regUVD_MMSCH_LMI_STATUS_BASE_IDX 1
+#define regVCN_RAS_CNTL_MMSCH 0x0914
+#define regVCN_RAS_CNTL_MMSCH_BASE_IDX 1
+
+// addressBlock: aid_uvd0_vcn_edcc_dec
+// base address: 0x21d20
+#define regVCN_UE_ERR_STATUS_LO_VIDD 0x094c
+#define regVCN_UE_ERR_STATUS_LO_VIDD_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_VIDD 0x094d
+#define regVCN_UE_ERR_STATUS_HI_VIDD_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_VIDV 0x094e
+#define regVCN_UE_ERR_STATUS_LO_VIDV_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_VIDV 0x094f
+#define regVCN_UE_ERR_STATUS_HI_VIDV_BASE_IDX 1
+#define regVCN_CE_ERR_STATUS_LO_MMSCHD 0x0950
+#define regVCN_CE_ERR_STATUS_LO_MMSCHD_BASE_IDX 1
+#define regVCN_CE_ERR_STATUS_HI_MMSCHD 0x0951
+#define regVCN_CE_ERR_STATUS_HI_MMSCHD_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG0S 0x0952
+#define regVCN_UE_ERR_STATUS_LO_JPEG0S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG0S 0x0953
+#define regVCN_UE_ERR_STATUS_HI_JPEG0S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG0D 0x0954
+#define regVCN_UE_ERR_STATUS_LO_JPEG0D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG0D 0x0955
+#define regVCN_UE_ERR_STATUS_HI_JPEG0D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG1S 0x0956
+#define regVCN_UE_ERR_STATUS_LO_JPEG1S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG1S 0x0957
+#define regVCN_UE_ERR_STATUS_HI_JPEG1S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG1D 0x0958
+#define regVCN_UE_ERR_STATUS_LO_JPEG1D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG1D 0x0959
+#define regVCN_UE_ERR_STATUS_HI_JPEG1D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG2S 0x095a
+#define regVCN_UE_ERR_STATUS_LO_JPEG2S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG2S 0x095b
+#define regVCN_UE_ERR_STATUS_HI_JPEG2S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG2D 0x095c
+#define regVCN_UE_ERR_STATUS_LO_JPEG2D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG2D 0x095d
+#define regVCN_UE_ERR_STATUS_HI_JPEG2D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG3S 0x095e
+#define regVCN_UE_ERR_STATUS_LO_JPEG3S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG3S 0x095f
+#define regVCN_UE_ERR_STATUS_HI_JPEG3S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG3D 0x0960
+#define regVCN_UE_ERR_STATUS_LO_JPEG3D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG3D 0x0961
+#define regVCN_UE_ERR_STATUS_HI_JPEG3D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG4S 0x0962
+#define regVCN_UE_ERR_STATUS_LO_JPEG4S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG4S 0x0963
+#define regVCN_UE_ERR_STATUS_HI_JPEG4S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG4D 0x0964
+#define regVCN_UE_ERR_STATUS_LO_JPEG4D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG4D 0x0965
+#define regVCN_UE_ERR_STATUS_HI_JPEG4D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG5S 0x0966
+#define regVCN_UE_ERR_STATUS_LO_JPEG5S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG5S 0x0967
+#define regVCN_UE_ERR_STATUS_HI_JPEG5S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG5D 0x0968
+#define regVCN_UE_ERR_STATUS_LO_JPEG5D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG5D 0x0969
+#define regVCN_UE_ERR_STATUS_HI_JPEG5D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG6S 0x096a
+#define regVCN_UE_ERR_STATUS_LO_JPEG6S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG6S 0x096b
+#define regVCN_UE_ERR_STATUS_HI_JPEG6S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG6D 0x096c
+#define regVCN_UE_ERR_STATUS_LO_JPEG6D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG6D 0x096d
+#define regVCN_UE_ERR_STATUS_HI_JPEG6D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG7S 0x096e
+#define regVCN_UE_ERR_STATUS_LO_JPEG7S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG7S 0x096f
+#define regVCN_UE_ERR_STATUS_HI_JPEG7S_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_LO_JPEG7D 0x0970
+#define regVCN_UE_ERR_STATUS_LO_JPEG7D_BASE_IDX 1
+#define regVCN_UE_ERR_STATUS_HI_JPEG7D 0x0971
+#define regVCN_UE_ERR_STATUS_HI_JPEG7D_BASE_IDX 1
+
+// addressBlock: aid_uvd0_uvd_jrbc1_uvd_jrbc_dec
+// base address: 0x1e000
+#define regUVD_JRBC1_UVD_JRBC_RB_WPTR 0x0000
+#define regUVD_JRBC1_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_RB_CNTL 0x0001
+#define regUVD_JRBC1_UVD_JRBC_RB_CNTL_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_IB_SIZE 0x0002
+#define regUVD_JRBC1_UVD_JRBC_IB_SIZE_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_URGENT_CNTL 0x0003
+#define regUVD_JRBC1_UVD_JRBC_URGENT_CNTL_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_RB_REF_DATA 0x0004
+#define regUVD_JRBC1_UVD_JRBC_RB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER 0x0005
+#define regUVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_SOFT_RESET 0x0008
+#define regUVD_JRBC1_UVD_JRBC_SOFT_RESET_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_STATUS 0x0009
+#define regUVD_JRBC1_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_RB_RPTR 0x000a
+#define regUVD_JRBC1_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_RB_BUF_STATUS 0x000b
+#define regUVD_JRBC1_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_IB_BUF_STATUS 0x000c
+#define regUVD_JRBC1_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_IB_SIZE_UPDATE 0x000d
+#define regUVD_JRBC1_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER 0x000e
+#define regUVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_IB_REF_DATA 0x000f
+#define regUVD_JRBC1_UVD_JRBC_IB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JPEG_PREEMPT_CMD 0x0010
+#define regUVD_JRBC1_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0011
+#define regUVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0012
+#define regUVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_RB_SIZE 0x0013
+#define regUVD_JRBC1_UVD_JRBC_RB_SIZE_BASE_IDX 0
+#define regUVD_JRBC1_UVD_JRBC_SCRATCH0 0x0014
+#define regUVD_JRBC1_UVD_JRBC_SCRATCH0_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jrbc2_uvd_jrbc_dec
+// base address: 0x1e100
+#define regUVD_JRBC2_UVD_JRBC_RB_WPTR 0x0040
+#define regUVD_JRBC2_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_CNTL 0x0041
+#define regUVD_JRBC2_UVD_JRBC_RB_CNTL_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_IB_SIZE 0x0042
+#define regUVD_JRBC2_UVD_JRBC_IB_SIZE_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_URGENT_CNTL 0x0043
+#define regUVD_JRBC2_UVD_JRBC_URGENT_CNTL_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_REF_DATA 0x0044
+#define regUVD_JRBC2_UVD_JRBC_RB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER 0x0045
+#define regUVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_SOFT_RESET 0x0048
+#define regUVD_JRBC2_UVD_JRBC_SOFT_RESET_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_STATUS 0x0049
+#define regUVD_JRBC2_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_RPTR 0x004a
+#define regUVD_JRBC2_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_BUF_STATUS 0x004b
+#define regUVD_JRBC2_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_IB_BUF_STATUS 0x004c
+#define regUVD_JRBC2_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_IB_SIZE_UPDATE 0x004d
+#define regUVD_JRBC2_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER 0x004e
+#define regUVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_IB_REF_DATA 0x004f
+#define regUVD_JRBC2_UVD_JRBC_IB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JPEG_PREEMPT_CMD 0x0050
+#define regUVD_JRBC2_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0051
+#define regUVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0052
+#define regUVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_RB_SIZE 0x0053
+#define regUVD_JRBC2_UVD_JRBC_RB_SIZE_BASE_IDX 0
+#define regUVD_JRBC2_UVD_JRBC_SCRATCH0 0x0054
+#define regUVD_JRBC2_UVD_JRBC_SCRATCH0_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jrbc3_uvd_jrbc_dec
+// base address: 0x1e200
+#define regUVD_JRBC3_UVD_JRBC_RB_WPTR 0x0080
+#define regUVD_JRBC3_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_CNTL 0x0081
+#define regUVD_JRBC3_UVD_JRBC_RB_CNTL_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_IB_SIZE 0x0082
+#define regUVD_JRBC3_UVD_JRBC_IB_SIZE_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_URGENT_CNTL 0x0083
+#define regUVD_JRBC3_UVD_JRBC_URGENT_CNTL_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_REF_DATA 0x0084
+#define regUVD_JRBC3_UVD_JRBC_RB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER 0x0085
+#define regUVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_SOFT_RESET 0x0088
+#define regUVD_JRBC3_UVD_JRBC_SOFT_RESET_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_STATUS 0x0089
+#define regUVD_JRBC3_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_RPTR 0x008a
+#define regUVD_JRBC3_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_BUF_STATUS 0x008b
+#define regUVD_JRBC3_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_IB_BUF_STATUS 0x008c
+#define regUVD_JRBC3_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_IB_SIZE_UPDATE 0x008d
+#define regUVD_JRBC3_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER 0x008e
+#define regUVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_IB_REF_DATA 0x008f
+#define regUVD_JRBC3_UVD_JRBC_IB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JPEG_PREEMPT_CMD 0x0090
+#define regUVD_JRBC3_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0091
+#define regUVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0092
+#define regUVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_RB_SIZE 0x0093
+#define regUVD_JRBC3_UVD_JRBC_RB_SIZE_BASE_IDX 0
+#define regUVD_JRBC3_UVD_JRBC_SCRATCH0 0x0094
+#define regUVD_JRBC3_UVD_JRBC_SCRATCH0_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jrbc4_uvd_jrbc_dec
+// base address: 0x1e300
+#define regUVD_JRBC4_UVD_JRBC_RB_WPTR 0x00c0
+#define regUVD_JRBC4_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_CNTL 0x00c1
+#define regUVD_JRBC4_UVD_JRBC_RB_CNTL_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_IB_SIZE 0x00c2
+#define regUVD_JRBC4_UVD_JRBC_IB_SIZE_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_URGENT_CNTL 0x00c3
+#define regUVD_JRBC4_UVD_JRBC_URGENT_CNTL_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_REF_DATA 0x00c4
+#define regUVD_JRBC4_UVD_JRBC_RB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER 0x00c5
+#define regUVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_SOFT_RESET 0x00c8
+#define regUVD_JRBC4_UVD_JRBC_SOFT_RESET_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_STATUS 0x00c9
+#define regUVD_JRBC4_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_RPTR 0x00ca
+#define regUVD_JRBC4_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_BUF_STATUS 0x00cb
+#define regUVD_JRBC4_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_IB_BUF_STATUS 0x00cc
+#define regUVD_JRBC4_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_IB_SIZE_UPDATE 0x00cd
+#define regUVD_JRBC4_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER 0x00ce
+#define regUVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_IB_REF_DATA 0x00cf
+#define regUVD_JRBC4_UVD_JRBC_IB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JPEG_PREEMPT_CMD 0x00d0
+#define regUVD_JRBC4_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA0 0x00d1
+#define regUVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA1 0x00d2
+#define regUVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_RB_SIZE 0x00d3
+#define regUVD_JRBC4_UVD_JRBC_RB_SIZE_BASE_IDX 0
+#define regUVD_JRBC4_UVD_JRBC_SCRATCH0 0x00d4
+#define regUVD_JRBC4_UVD_JRBC_SCRATCH0_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jrbc5_uvd_jrbc_dec
+// base address: 0x1e400
+#define regUVD_JRBC5_UVD_JRBC_RB_WPTR 0x0100
+#define regUVD_JRBC5_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_CNTL 0x0101
+#define regUVD_JRBC5_UVD_JRBC_RB_CNTL_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_IB_SIZE 0x0102
+#define regUVD_JRBC5_UVD_JRBC_IB_SIZE_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_URGENT_CNTL 0x0103
+#define regUVD_JRBC5_UVD_JRBC_URGENT_CNTL_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_REF_DATA 0x0104
+#define regUVD_JRBC5_UVD_JRBC_RB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER 0x0105
+#define regUVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_SOFT_RESET 0x0108
+#define regUVD_JRBC5_UVD_JRBC_SOFT_RESET_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_STATUS 0x0109
+#define regUVD_JRBC5_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_RPTR 0x010a
+#define regUVD_JRBC5_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_BUF_STATUS 0x010b
+#define regUVD_JRBC5_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_IB_BUF_STATUS 0x010c
+#define regUVD_JRBC5_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_IB_SIZE_UPDATE 0x010d
+#define regUVD_JRBC5_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER 0x010e
+#define regUVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_IB_REF_DATA 0x010f
+#define regUVD_JRBC5_UVD_JRBC_IB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JPEG_PREEMPT_CMD 0x0110
+#define regUVD_JRBC5_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0111
+#define regUVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0112
+#define regUVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_RB_SIZE 0x0113
+#define regUVD_JRBC5_UVD_JRBC_RB_SIZE_BASE_IDX 0
+#define regUVD_JRBC5_UVD_JRBC_SCRATCH0 0x0114
+#define regUVD_JRBC5_UVD_JRBC_SCRATCH0_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jrbc6_uvd_jrbc_dec
+// base address: 0x1e500
+#define regUVD_JRBC6_UVD_JRBC_RB_WPTR 0x0140
+#define regUVD_JRBC6_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_CNTL 0x0141
+#define regUVD_JRBC6_UVD_JRBC_RB_CNTL_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_IB_SIZE 0x0142
+#define regUVD_JRBC6_UVD_JRBC_IB_SIZE_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_URGENT_CNTL 0x0143
+#define regUVD_JRBC6_UVD_JRBC_URGENT_CNTL_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_REF_DATA 0x0144
+#define regUVD_JRBC6_UVD_JRBC_RB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER 0x0145
+#define regUVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_SOFT_RESET 0x0148
+#define regUVD_JRBC6_UVD_JRBC_SOFT_RESET_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_STATUS 0x0149
+#define regUVD_JRBC6_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_RPTR 0x014a
+#define regUVD_JRBC6_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_BUF_STATUS 0x014b
+#define regUVD_JRBC6_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_IB_BUF_STATUS 0x014c
+#define regUVD_JRBC6_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_IB_SIZE_UPDATE 0x014d
+#define regUVD_JRBC6_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER 0x014e
+#define regUVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_IB_REF_DATA 0x014f
+#define regUVD_JRBC6_UVD_JRBC_IB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JPEG_PREEMPT_CMD 0x0150
+#define regUVD_JRBC6_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0151
+#define regUVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0152
+#define regUVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_RB_SIZE 0x0153
+#define regUVD_JRBC6_UVD_JRBC_RB_SIZE_BASE_IDX 0
+#define regUVD_JRBC6_UVD_JRBC_SCRATCH0 0x0154
+#define regUVD_JRBC6_UVD_JRBC_SCRATCH0_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jrbc7_uvd_jrbc_dec
+// base address: 0x1e600
+#define regUVD_JRBC7_UVD_JRBC_RB_WPTR 0x0180
+#define regUVD_JRBC7_UVD_JRBC_RB_WPTR_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_CNTL 0x0181
+#define regUVD_JRBC7_UVD_JRBC_RB_CNTL_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_IB_SIZE 0x0182
+#define regUVD_JRBC7_UVD_JRBC_IB_SIZE_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_URGENT_CNTL 0x0183
+#define regUVD_JRBC7_UVD_JRBC_URGENT_CNTL_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_REF_DATA 0x0184
+#define regUVD_JRBC7_UVD_JRBC_RB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER 0x0185
+#define regUVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_SOFT_RESET 0x0188
+#define regUVD_JRBC7_UVD_JRBC_SOFT_RESET_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_STATUS 0x0189
+#define regUVD_JRBC7_UVD_JRBC_STATUS_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_RPTR 0x018a
+#define regUVD_JRBC7_UVD_JRBC_RB_RPTR_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_BUF_STATUS 0x018b
+#define regUVD_JRBC7_UVD_JRBC_RB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_IB_BUF_STATUS 0x018c
+#define regUVD_JRBC7_UVD_JRBC_IB_BUF_STATUS_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_IB_SIZE_UPDATE 0x018d
+#define regUVD_JRBC7_UVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER 0x018e
+#define regUVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_IB_REF_DATA 0x018f
+#define regUVD_JRBC7_UVD_JRBC_IB_REF_DATA_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JPEG_PREEMPT_CMD 0x0190
+#define regUVD_JRBC7_UVD_JPEG_PREEMPT_CMD_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA0 0x0191
+#define regUVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA1 0x0192
+#define regUVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_RB_SIZE 0x0193
+#define regUVD_JRBC7_UVD_JRBC_RB_SIZE_BASE_IDX 0
+#define regUVD_JRBC7_UVD_JRBC_SCRATCH0 0x0194
+#define regUVD_JRBC7_UVD_JRBC_SCRATCH0_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jmi1_uvd_jmi_dec
+// base address: 0x1e080
+#define regUVD_JMI1_UVD_JPEG_DEC_PF_CTRL 0x0020
+#define regUVD_JMI1_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JRBC_CTRL 0x0021
+#define regUVD_JMI1_UVD_LMI_JRBC_CTRL_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JPEG_CTRL 0x0022
+#define regUVD_JMI1_UVD_LMI_JPEG_CTRL_BASE_IDX 0
+#define regUVD_JMI1_JPEG_LMI_DROP 0x0023
+#define regUVD_JMI1_JPEG_LMI_DROP_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JRBC_IB_VMID 0x0024
+#define regUVD_JMI1_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JRBC_RB_VMID 0x0025
+#define regUVD_JMI1_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JPEG_VMID 0x0026
+#define regUVD_JMI1_UVD_LMI_JPEG_VMID_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0027
+#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0028
+#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0029
+#define regUVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x002a
+#define regUVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x002b
+#define regUVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x002c
+#define regUVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_VMID 0x002d
+#define regUVD_JMI1_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0
+#define regUVD_JMI1_UVD_JMI_DEC_SWAP_CNTL 0x002e
+#define regUVD_JMI1_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0
+#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL 0x002f
+#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0
+#define regUVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0030
+#define regUVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0031
+#define regUVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0032
+#define regUVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0033
+#define regUVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0034
+#define regUVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0035
+#define regUVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0036
+#define regUVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0037
+#define regUVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0038
+#define regUVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0039
+#define regUVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL2 0x003d
+#define regUVD_JMI1_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jmi2_uvd_jmi_dec
+// base address: 0x1e180
+#define regUVD_JMI2_UVD_JPEG_DEC_PF_CTRL 0x0060
+#define regUVD_JMI2_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JRBC_CTRL 0x0061
+#define regUVD_JMI2_UVD_LMI_JRBC_CTRL_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JPEG_CTRL 0x0062
+#define regUVD_JMI2_UVD_LMI_JPEG_CTRL_BASE_IDX 0
+#define regUVD_JMI2_JPEG_LMI_DROP 0x0063
+#define regUVD_JMI2_JPEG_LMI_DROP_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JRBC_IB_VMID 0x0064
+#define regUVD_JMI2_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JRBC_RB_VMID 0x0065
+#define regUVD_JMI2_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JPEG_VMID 0x0066
+#define regUVD_JMI2_UVD_LMI_JPEG_VMID_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0067
+#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0068
+#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0069
+#define regUVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x006a
+#define regUVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x006b
+#define regUVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x006c
+#define regUVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_VMID 0x006d
+#define regUVD_JMI2_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0
+#define regUVD_JMI2_UVD_JMI_DEC_SWAP_CNTL 0x006e
+#define regUVD_JMI2_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0
+#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL 0x006f
+#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0
+#define regUVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0070
+#define regUVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0071
+#define regUVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0072
+#define regUVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0073
+#define regUVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0074
+#define regUVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0075
+#define regUVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0076
+#define regUVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0077
+#define regUVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0078
+#define regUVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0079
+#define regUVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL2 0x007d
+#define regUVD_JMI2_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jmi3_uvd_jmi_dec
+// base address: 0x1e280
+#define regUVD_JMI3_UVD_JPEG_DEC_PF_CTRL 0x00a0
+#define regUVD_JMI3_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JRBC_CTRL 0x00a1
+#define regUVD_JMI3_UVD_LMI_JRBC_CTRL_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JPEG_CTRL 0x00a2
+#define regUVD_JMI3_UVD_LMI_JPEG_CTRL_BASE_IDX 0
+#define regUVD_JMI3_JPEG_LMI_DROP 0x00a3
+#define regUVD_JMI3_JPEG_LMI_DROP_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JRBC_IB_VMID 0x00a4
+#define regUVD_JMI3_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JRBC_RB_VMID 0x00a5
+#define regUVD_JMI3_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JPEG_VMID 0x00a6
+#define regUVD_JMI3_UVD_LMI_JPEG_VMID_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x00a7
+#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x00a8
+#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x00a9
+#define regUVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x00aa
+#define regUVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x00ab
+#define regUVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x00ac
+#define regUVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_VMID 0x00ad
+#define regUVD_JMI3_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0
+#define regUVD_JMI3_UVD_JMI_DEC_SWAP_CNTL 0x00ae
+#define regUVD_JMI3_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0
+#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL 0x00af
+#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0
+#define regUVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x00b0
+#define regUVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x00b1
+#define regUVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x00b2
+#define regUVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x00b3
+#define regUVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x00b4
+#define regUVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x00b5
+#define regUVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x00b6
+#define regUVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x00b7
+#define regUVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x00b8
+#define regUVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x00b9
+#define regUVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL2 0x00bd
+#define regUVD_JMI3_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jmi4_uvd_jmi_dec
+// base address: 0x1e380
+#define regUVD_JMI4_UVD_JPEG_DEC_PF_CTRL 0x00e0
+#define regUVD_JMI4_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JRBC_CTRL 0x00e1
+#define regUVD_JMI4_UVD_LMI_JRBC_CTRL_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JPEG_CTRL 0x00e2
+#define regUVD_JMI4_UVD_LMI_JPEG_CTRL_BASE_IDX 0
+#define regUVD_JMI4_JPEG_LMI_DROP 0x00e3
+#define regUVD_JMI4_JPEG_LMI_DROP_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JRBC_IB_VMID 0x00e4
+#define regUVD_JMI4_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JRBC_RB_VMID 0x00e5
+#define regUVD_JMI4_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JPEG_VMID 0x00e6
+#define regUVD_JMI4_UVD_LMI_JPEG_VMID_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x00e7
+#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x00e8
+#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x00e9
+#define regUVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x00ea
+#define regUVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x00eb
+#define regUVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x00ec
+#define regUVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_VMID 0x00ed
+#define regUVD_JMI4_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0
+#define regUVD_JMI4_UVD_JMI_DEC_SWAP_CNTL 0x00ee
+#define regUVD_JMI4_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0
+#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL 0x00ef
+#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0
+#define regUVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x00f0
+#define regUVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x00f1
+#define regUVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x00f2
+#define regUVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x00f3
+#define regUVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x00f4
+#define regUVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x00f5
+#define regUVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x00f6
+#define regUVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x00f7
+#define regUVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x00f8
+#define regUVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x00f9
+#define regUVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL2 0x00fd
+#define regUVD_JMI4_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jmi5_uvd_jmi_dec
+// base address: 0x1e480
+#define regUVD_JMI5_UVD_JPEG_DEC_PF_CTRL 0x0120
+#define regUVD_JMI5_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JRBC_CTRL 0x0121
+#define regUVD_JMI5_UVD_LMI_JRBC_CTRL_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JPEG_CTRL 0x0122
+#define regUVD_JMI5_UVD_LMI_JPEG_CTRL_BASE_IDX 0
+#define regUVD_JMI5_JPEG_LMI_DROP 0x0123
+#define regUVD_JMI5_JPEG_LMI_DROP_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JRBC_IB_VMID 0x0124
+#define regUVD_JMI5_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JRBC_RB_VMID 0x0125
+#define regUVD_JMI5_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JPEG_VMID 0x0126
+#define regUVD_JMI5_UVD_LMI_JPEG_VMID_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0127
+#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0128
+#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0129
+#define regUVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x012a
+#define regUVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x012b
+#define regUVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x012c
+#define regUVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_VMID 0x012d
+#define regUVD_JMI5_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0
+#define regUVD_JMI5_UVD_JMI_DEC_SWAP_CNTL 0x012e
+#define regUVD_JMI5_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0
+#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL 0x012f
+#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0
+#define regUVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0130
+#define regUVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0131
+#define regUVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0132
+#define regUVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0133
+#define regUVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0134
+#define regUVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0135
+#define regUVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0136
+#define regUVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0137
+#define regUVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0138
+#define regUVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0139
+#define regUVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL2 0x013d
+#define regUVD_JMI5_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jmi6_uvd_jmi_dec
+// base address: 0x1e580
+#define regUVD_JMI6_UVD_JPEG_DEC_PF_CTRL 0x0160
+#define regUVD_JMI6_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JRBC_CTRL 0x0161
+#define regUVD_JMI6_UVD_LMI_JRBC_CTRL_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JPEG_CTRL 0x0162
+#define regUVD_JMI6_UVD_LMI_JPEG_CTRL_BASE_IDX 0
+#define regUVD_JMI6_JPEG_LMI_DROP 0x0163
+#define regUVD_JMI6_JPEG_LMI_DROP_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JRBC_IB_VMID 0x0164
+#define regUVD_JMI6_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JRBC_RB_VMID 0x0165
+#define regUVD_JMI6_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JPEG_VMID 0x0166
+#define regUVD_JMI6_UVD_LMI_JPEG_VMID_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0167
+#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0168
+#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0169
+#define regUVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x016a
+#define regUVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x016b
+#define regUVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x016c
+#define regUVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_VMID 0x016d
+#define regUVD_JMI6_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0
+#define regUVD_JMI6_UVD_JMI_DEC_SWAP_CNTL 0x016e
+#define regUVD_JMI6_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0
+#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL 0x016f
+#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0
+#define regUVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x0170
+#define regUVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x0171
+#define regUVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0172
+#define regUVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0173
+#define regUVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0174
+#define regUVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0175
+#define regUVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0176
+#define regUVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0177
+#define regUVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0178
+#define regUVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0179
+#define regUVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL2 0x017d
+#define regUVD_JMI6_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+
+
+// addressBlock: aid_uvd0_uvd_jmi7_uvd_jmi_dec
+// base address: 0x1e680
+#define regUVD_JMI7_UVD_JPEG_DEC_PF_CTRL 0x01a0
+#define regUVD_JMI7_UVD_JPEG_DEC_PF_CTRL_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JRBC_CTRL 0x01a1
+#define regUVD_JMI7_UVD_LMI_JRBC_CTRL_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JPEG_CTRL 0x01a2
+#define regUVD_JMI7_UVD_LMI_JPEG_CTRL_BASE_IDX 0
+#define regUVD_JMI7_JPEG_LMI_DROP 0x01a3
+#define regUVD_JMI7_JPEG_LMI_DROP_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JRBC_IB_VMID 0x01a4
+#define regUVD_JMI7_UVD_LMI_JRBC_IB_VMID_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JRBC_RB_VMID 0x01a5
+#define regUVD_JMI7_UVD_LMI_JRBC_RB_VMID_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JPEG_VMID 0x01a6
+#define regUVD_JMI7_UVD_LMI_JPEG_VMID_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x01a7
+#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x01a8
+#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x01a9
+#define regUVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x01aa
+#define regUVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x01ab
+#define regUVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x01ac
+#define regUVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_VMID 0x01ad
+#define regUVD_JMI7_UVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0
+#define regUVD_JMI7_UVD_JMI_DEC_SWAP_CNTL 0x01ae
+#define regUVD_JMI7_UVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0
+#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL 0x01af
+#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL_BASE_IDX 0
+#define regUVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW 0x01b0
+#define regUVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH 0x01b1
+#define regUVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x01b2
+#define regUVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x01b3
+#define regUVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x01b4
+#define regUVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x01b5
+#define regUVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x01b6
+#define regUVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x01b7
+#define regUVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x01b8
+#define regUVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0
+#define regUVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x01b9
+#define regUVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0
+#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL2 0x01bd
+#define regUVD_JMI7_UVD_JMI_ATOMIC_CNTL2_BASE_IDX 0
+
+
+// addressBlock: uvdctxind
+// base address: 0x0
+#define ixUVD_CGC_MEM_CTRL 0x0000
+#define ixUVD_CGC_CTRL2 0x0001
+#define ixUVD_CGC_MEM_DS_CTRL 0x0002
+#define ixUVD_CGC_MEM_SD_CTRL 0x0003
+#define ixUVD_SW_SCRATCH_00 0x0004
+#define ixUVD_SW_SCRATCH_01 0x0005
+#define ixUVD_SW_SCRATCH_02 0x0006
+#define ixUVD_SW_SCRATCH_03 0x0007
+#define ixUVD_SW_SCRATCH_04 0x0008
+#define ixUVD_SW_SCRATCH_05 0x0009
+#define ixUVD_SW_SCRATCH_06 0x000a
+#define ixUVD_SW_SCRATCH_07 0x000b
+#define ixUVD_SW_SCRATCH_08 0x000c
+#define ixUVD_SW_SCRATCH_09 0x000d
+#define ixUVD_SW_SCRATCH_10 0x000e
+#define ixUVD_SW_SCRATCH_11 0x000f
+#define ixUVD_SW_SCRATCH_12 0x0010
+#define ixUVD_SW_SCRATCH_13 0x0011
+#define ixUVD_SW_SCRATCH_14 0x0012
+#define ixUVD_SW_SCRATCH_15 0x0013
+#define ixUVD_IH_SEM_CTRL 0x001e
+
+
+// addressBlock: lmi_adp_indirect
+// base address: 0x0
+#define ixUVD_LMI_CRC0 0x0000
+#define ixUVD_LMI_CRC1 0x0001
+#define ixUVD_LMI_CRC2 0x0002
+#define ixUVD_LMI_CRC3 0x0003
+#define ixUVD_LMI_CRC10 0x000a
+#define ixUVD_LMI_CRC11 0x000b
+#define ixUVD_LMI_CRC12 0x000c
+#define ixUVD_LMI_CRC13 0x000d
+#define ixUVD_LMI_CRC14 0x000e
+#define ixUVD_LMI_CRC15 0x000f
+#define ixUVD_LMI_SWAP_CNTL2 0x0029
+#define ixUVD_MEMCHECK_SYS_INT_EN 0x0134
+#define ixUVD_MEMCHECK_SYS_INT_STAT 0x0135
+#define ixUVD_MEMCHECK_SYS_INT_ACK 0x0136
+#define ixUVD_MEMCHECK_VCPU_INT_EN 0x0137
+#define ixUVD_MEMCHECK_VCPU_INT_STAT 0x0138
+#define ixUVD_MEMCHECK_VCPU_INT_ACK 0x0139
+#define ixUVD_MEMCHECK2_SYS_INT_STAT 0x0140
+#define ixUVD_MEMCHECK2_SYS_INT_ACK 0x0141
+#define ixUVD_MEMCHECK2_VCPU_INT_STAT 0x0142
+#define ixUVD_MEMCHECK2_VCPU_INT_ACK 0x0143
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h
new file mode 100644
index 000000000000..5bd8111bf04a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h
@@ -0,0 +1,10919 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _vcn_4_0_3_SH_MASK_HEADER
+#define _vcn_4_0_3_SH_MASK_HEADER
+
+
+// addressBlock: aid_uvd0_uvddec
+//UVD_TOP_CTRL
+#define UVD_TOP_CTRL__STANDARD__SHIFT 0x0
+#define UVD_TOP_CTRL__STD_VERSION__SHIFT 0x4
+#define UVD_TOP_CTRL__STANDARD_MASK 0x0000000FL
+#define UVD_TOP_CTRL__STD_VERSION_MASK 0x00000010L
+//UVD_CGC_GATE
+#define UVD_CGC_GATE__SYS__SHIFT 0x0
+#define UVD_CGC_GATE__UDEC__SHIFT 0x1
+#define UVD_CGC_GATE__MPEG2__SHIFT 0x2
+#define UVD_CGC_GATE__REGS__SHIFT 0x3
+#define UVD_CGC_GATE__RBC__SHIFT 0x4
+#define UVD_CGC_GATE__LMI_MC__SHIFT 0x5
+#define UVD_CGC_GATE__LMI_UMC__SHIFT 0x6
+#define UVD_CGC_GATE__IDCT__SHIFT 0x7
+#define UVD_CGC_GATE__MPRD__SHIFT 0x8
+#define UVD_CGC_GATE__MPC__SHIFT 0x9
+#define UVD_CGC_GATE__LBSI__SHIFT 0xa
+#define UVD_CGC_GATE__LRBBM__SHIFT 0xb
+#define UVD_CGC_GATE__UDEC_RE__SHIFT 0xc
+#define UVD_CGC_GATE__UDEC_CM__SHIFT 0xd
+#define UVD_CGC_GATE__UDEC_IT__SHIFT 0xe
+#define UVD_CGC_GATE__UDEC_DB__SHIFT 0xf
+#define UVD_CGC_GATE__UDEC_MP__SHIFT 0x10
+#define UVD_CGC_GATE__WCB__SHIFT 0x11
+#define UVD_CGC_GATE__VCPU__SHIFT 0x12
+#define UVD_CGC_GATE__MMSCH__SHIFT 0x14
+#define UVD_CGC_GATE__LCM0__SHIFT 0x15
+#define UVD_CGC_GATE__LCM1__SHIFT 0x16
+#define UVD_CGC_GATE__MIF__SHIFT 0x17
+#define UVD_CGC_GATE__VREG__SHIFT 0x18
+#define UVD_CGC_GATE__PE__SHIFT 0x19
+#define UVD_CGC_GATE__PPU__SHIFT 0x1a
+#define UVD_CGC_GATE__SYS_MASK 0x00000001L
+#define UVD_CGC_GATE__UDEC_MASK 0x00000002L
+#define UVD_CGC_GATE__MPEG2_MASK 0x00000004L
+#define UVD_CGC_GATE__REGS_MASK 0x00000008L
+#define UVD_CGC_GATE__RBC_MASK 0x00000010L
+#define UVD_CGC_GATE__LMI_MC_MASK 0x00000020L
+#define UVD_CGC_GATE__LMI_UMC_MASK 0x00000040L
+#define UVD_CGC_GATE__IDCT_MASK 0x00000080L
+#define UVD_CGC_GATE__MPRD_MASK 0x00000100L
+#define UVD_CGC_GATE__MPC_MASK 0x00000200L
+#define UVD_CGC_GATE__LBSI_MASK 0x00000400L
+#define UVD_CGC_GATE__LRBBM_MASK 0x00000800L
+#define UVD_CGC_GATE__UDEC_RE_MASK 0x00001000L
+#define UVD_CGC_GATE__UDEC_CM_MASK 0x00002000L
+#define UVD_CGC_GATE__UDEC_IT_MASK 0x00004000L
+#define UVD_CGC_GATE__UDEC_DB_MASK 0x00008000L
+#define UVD_CGC_GATE__UDEC_MP_MASK 0x00010000L
+#define UVD_CGC_GATE__WCB_MASK 0x00020000L
+#define UVD_CGC_GATE__VCPU_MASK 0x00040000L
+#define UVD_CGC_GATE__MMSCH_MASK 0x00100000L
+#define UVD_CGC_GATE__LCM0_MASK 0x00200000L
+#define UVD_CGC_GATE__LCM1_MASK 0x00400000L
+#define UVD_CGC_GATE__MIF_MASK 0x00800000L
+#define UVD_CGC_GATE__VREG_MASK 0x01000000L
+#define UVD_CGC_GATE__PE_MASK 0x02000000L
+#define UVD_CGC_GATE__PPU_MASK 0x04000000L
+//UVD_CGC_CTRL
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x2
+#define UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x6
+#define UVD_CGC_CTRL__UDEC_RE_MODE__SHIFT 0xb
+#define UVD_CGC_CTRL__UDEC_CM_MODE__SHIFT 0xc
+#define UVD_CGC_CTRL__UDEC_IT_MODE__SHIFT 0xd
+#define UVD_CGC_CTRL__UDEC_DB_MODE__SHIFT 0xe
+#define UVD_CGC_CTRL__UDEC_MP_MODE__SHIFT 0xf
+#define UVD_CGC_CTRL__SYS_MODE__SHIFT 0x10
+#define UVD_CGC_CTRL__UDEC_MODE__SHIFT 0x11
+#define UVD_CGC_CTRL__MPEG2_MODE__SHIFT 0x12
+#define UVD_CGC_CTRL__REGS_MODE__SHIFT 0x13
+#define UVD_CGC_CTRL__RBC_MODE__SHIFT 0x14
+#define UVD_CGC_CTRL__LMI_MC_MODE__SHIFT 0x15
+#define UVD_CGC_CTRL__LMI_UMC_MODE__SHIFT 0x16
+#define UVD_CGC_CTRL__IDCT_MODE__SHIFT 0x17
+#define UVD_CGC_CTRL__MPRD_MODE__SHIFT 0x18
+#define UVD_CGC_CTRL__MPC_MODE__SHIFT 0x19
+#define UVD_CGC_CTRL__LBSI_MODE__SHIFT 0x1a
+#define UVD_CGC_CTRL__LRBBM_MODE__SHIFT 0x1b
+#define UVD_CGC_CTRL__WCB_MODE__SHIFT 0x1c
+#define UVD_CGC_CTRL__VCPU_MODE__SHIFT 0x1d
+#define UVD_CGC_CTRL__MMSCH_MODE__SHIFT 0x1f
+#define UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L
+#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000003CL
+#define UVD_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000007C0L
+#define UVD_CGC_CTRL__UDEC_RE_MODE_MASK 0x00000800L
+#define UVD_CGC_CTRL__UDEC_CM_MODE_MASK 0x00001000L
+#define UVD_CGC_CTRL__UDEC_IT_MODE_MASK 0x00002000L
+#define UVD_CGC_CTRL__UDEC_DB_MODE_MASK 0x00004000L
+#define UVD_CGC_CTRL__UDEC_MP_MODE_MASK 0x00008000L
+#define UVD_CGC_CTRL__SYS_MODE_MASK 0x00010000L
+#define UVD_CGC_CTRL__UDEC_MODE_MASK 0x00020000L
+#define UVD_CGC_CTRL__MPEG2_MODE_MASK 0x00040000L
+#define UVD_CGC_CTRL__REGS_MODE_MASK 0x00080000L
+#define UVD_CGC_CTRL__RBC_MODE_MASK 0x00100000L
+#define UVD_CGC_CTRL__LMI_MC_MODE_MASK 0x00200000L
+#define UVD_CGC_CTRL__LMI_UMC_MODE_MASK 0x00400000L
+#define UVD_CGC_CTRL__IDCT_MODE_MASK 0x00800000L
+#define UVD_CGC_CTRL__MPRD_MODE_MASK 0x01000000L
+#define UVD_CGC_CTRL__MPC_MODE_MASK 0x02000000L
+#define UVD_CGC_CTRL__LBSI_MODE_MASK 0x04000000L
+#define UVD_CGC_CTRL__LRBBM_MODE_MASK 0x08000000L
+#define UVD_CGC_CTRL__WCB_MODE_MASK 0x10000000L
+#define UVD_CGC_CTRL__VCPU_MODE_MASK 0x20000000L
+#define UVD_CGC_CTRL__MMSCH_MODE_MASK 0x80000000L
+//AVM_SUVD_CGC_GATE
+#define AVM_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define AVM_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define AVM_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define AVM_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define AVM_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define AVM_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define AVM_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define AVM_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define AVM_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define AVM_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define AVM_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define AVM_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define AVM_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define AVM_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define AVM_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define AVM_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define AVM_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define AVM_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define AVM_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define AVM_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define AVM_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define AVM_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define AVM_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define AVM_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define AVM_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define AVM_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define AVM_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define AVM_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define AVM_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define AVM_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define AVM_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define AVM_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define AVM_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define AVM_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define AVM_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define AVM_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define AVM_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define AVM_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define AVM_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define AVM_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define AVM_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define AVM_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define AVM_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define AVM_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define AVM_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define AVM_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define AVM_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define AVM_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define AVM_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define AVM_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define AVM_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define AVM_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define AVM_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define AVM_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define AVM_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define AVM_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define AVM_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define AVM_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define AVM_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define AVM_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//CDEFE_SUVD_CGC_GATE
+#define CDEFE_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define CDEFE_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define CDEFE_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define CDEFE_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define CDEFE_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define CDEFE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define CDEFE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define CDEFE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define CDEFE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define CDEFE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define CDEFE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define CDEFE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define CDEFE_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define CDEFE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define CDEFE_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define CDEFE_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define CDEFE_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define CDEFE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define CDEFE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define CDEFE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define CDEFE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define CDEFE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define CDEFE_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define CDEFE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define CDEFE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define CDEFE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define CDEFE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define CDEFE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define CDEFE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define CDEFE_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define CDEFE_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define CDEFE_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define CDEFE_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define CDEFE_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define CDEFE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define CDEFE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define CDEFE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define CDEFE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define CDEFE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define CDEFE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define CDEFE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define CDEFE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define CDEFE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define CDEFE_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define CDEFE_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define CDEFE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define CDEFE_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define CDEFE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define CDEFE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define CDEFE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define CDEFE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define CDEFE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define CDEFE_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define CDEFE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define CDEFE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define CDEFE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define CDEFE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define CDEFE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define CDEFE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//EFC_SUVD_CGC_GATE
+#define EFC_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define EFC_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define EFC_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define EFC_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define EFC_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define EFC_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define EFC_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define EFC_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define EFC_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define EFC_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define EFC_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define EFC_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define EFC_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define EFC_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define EFC_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define EFC_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define EFC_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define EFC_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define EFC_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define EFC_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define EFC_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define EFC_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define EFC_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define EFC_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define EFC_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define EFC_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define EFC_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define EFC_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define EFC_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define EFC_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define EFC_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define EFC_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define EFC_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define EFC_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define EFC_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define EFC_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define EFC_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define EFC_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define EFC_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define EFC_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define EFC_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define EFC_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define EFC_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define EFC_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define EFC_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define EFC_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define EFC_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define EFC_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define EFC_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define EFC_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define EFC_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define EFC_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define EFC_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define EFC_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define EFC_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define EFC_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define EFC_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define EFC_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define EFC_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define EFC_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//ENT_SUVD_CGC_GATE
+#define ENT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define ENT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define ENT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define ENT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define ENT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define ENT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define ENT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define ENT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define ENT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define ENT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define ENT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define ENT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define ENT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define ENT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define ENT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define ENT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define ENT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define ENT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define ENT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define ENT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define ENT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define ENT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define ENT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define ENT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define ENT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define ENT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define ENT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define ENT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define ENT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define ENT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define ENT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define ENT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define ENT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define ENT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define ENT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define ENT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define ENT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define ENT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define ENT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define ENT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define ENT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define ENT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define ENT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define ENT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define ENT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define ENT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define ENT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define ENT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define ENT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define ENT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define ENT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define ENT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define ENT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define ENT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define ENT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define ENT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define ENT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define ENT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define ENT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define ENT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//IME_SUVD_CGC_GATE
+#define IME_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define IME_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define IME_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define IME_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define IME_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define IME_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define IME_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define IME_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define IME_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define IME_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define IME_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define IME_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define IME_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define IME_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define IME_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define IME_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define IME_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define IME_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define IME_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define IME_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define IME_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define IME_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define IME_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define IME_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define IME_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define IME_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define IME_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define IME_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define IME_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define IME_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define IME_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define IME_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define IME_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define IME_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define IME_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define IME_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define IME_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define IME_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define IME_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define IME_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define IME_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define IME_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define IME_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define IME_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define IME_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define IME_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define IME_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define IME_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define IME_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define IME_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define IME_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define IME_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define IME_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define IME_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define IME_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define IME_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define IME_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define IME_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define IME_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define IME_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define IME_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define IME_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define IME_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define IME_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//PPU_SUVD_CGC_GATE
+#define PPU_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define PPU_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define PPU_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define PPU_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define PPU_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define PPU_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define PPU_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define PPU_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define PPU_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define PPU_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define PPU_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define PPU_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define PPU_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define PPU_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define PPU_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define PPU_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define PPU_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define PPU_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define PPU_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define PPU_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define PPU_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define PPU_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define PPU_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define PPU_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define PPU_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define PPU_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define PPU_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define PPU_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define PPU_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define PPU_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define PPU_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define PPU_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define PPU_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define PPU_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define PPU_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define PPU_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define PPU_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define PPU_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define PPU_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define PPU_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define PPU_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define PPU_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define PPU_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define PPU_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define PPU_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define PPU_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define PPU_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define PPU_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define PPU_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define PPU_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define PPU_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define PPU_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define PPU_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define PPU_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define PPU_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define PPU_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define PPU_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define PPU_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define PPU_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define PPU_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SAOE_SUVD_CGC_GATE
+#define SAOE_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SAOE_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SAOE_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SAOE_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SAOE_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SAOE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SAOE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SAOE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SAOE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SAOE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SAOE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SAOE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SAOE_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SAOE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SAOE_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SAOE_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SAOE_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SAOE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SAOE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SAOE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SAOE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SAOE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SAOE_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SAOE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SAOE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SAOE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SAOE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SAOE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SAOE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SAOE_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SAOE_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SAOE_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SAOE_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SAOE_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SAOE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SAOE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SAOE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SAOE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SAOE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SAOE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SAOE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SAOE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SAOE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SAOE_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SAOE_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SAOE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SAOE_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SAOE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SAOE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SAOE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SAOE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SAOE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SAOE_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SAOE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SAOE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SAOE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SAOE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SAOE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SAOE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SCM_SUVD_CGC_GATE
+#define SCM_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SCM_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SCM_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SCM_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SCM_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SCM_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SCM_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SCM_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SCM_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SCM_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SCM_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SCM_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SCM_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SCM_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SCM_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SCM_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SCM_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SCM_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SCM_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SCM_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SCM_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SCM_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SCM_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SCM_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SCM_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SCM_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SCM_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SCM_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SCM_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SCM_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SCM_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SCM_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SCM_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SCM_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SCM_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SCM_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SCM_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SCM_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SCM_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SCM_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SCM_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SCM_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SCM_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SCM_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SCM_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SCM_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SCM_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SCM_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SCM_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SCM_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SCM_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SCM_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SCM_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SCM_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SCM_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SCM_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SCM_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SCM_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SCM_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SCM_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SDB_SUVD_CGC_GATE
+#define SDB_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SDB_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SDB_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SDB_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SDB_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SDB_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SDB_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SDB_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SDB_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SDB_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SDB_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SDB_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SDB_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SDB_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SDB_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SDB_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SDB_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SDB_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SDB_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SDB_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SDB_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SDB_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SDB_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SDB_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SDB_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SDB_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SDB_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SDB_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SDB_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SDB_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SDB_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SDB_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SDB_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SDB_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SDB_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SDB_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SDB_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SDB_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SDB_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SDB_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SDB_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SDB_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SDB_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SDB_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SDB_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SDB_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SDB_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SDB_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SDB_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SDB_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SDB_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SDB_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SDB_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SDB_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SDB_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SDB_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SDB_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SDB_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SDB_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SDB_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT0_NXT_SUVD_CGC_GATE
+#define SIT0_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT0_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT0_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT0_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT0_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT0_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT0_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT0_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT0_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT0_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT0_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT0_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT0_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT0_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT0_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT0_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT0_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT0_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT0_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT0_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT0_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT0_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT0_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT0_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT0_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT0_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT1_NXT_SUVD_CGC_GATE
+#define SIT1_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT1_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT1_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT1_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT1_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT1_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT1_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT1_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT1_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT1_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT1_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT1_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT1_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT1_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT1_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT1_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT1_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT1_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT1_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT1_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT1_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT1_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT1_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT1_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT1_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT1_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT2_NXT_SUVD_CGC_GATE
+#define SIT2_NXT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT2_NXT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT2_NXT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT2_NXT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT2_NXT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT2_NXT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT2_NXT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT2_NXT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT2_NXT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT2_NXT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT2_NXT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT2_NXT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT2_NXT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT2_NXT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT2_NXT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT2_NXT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT2_NXT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT2_NXT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT2_NXT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT2_NXT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT2_NXT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT2_NXT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT2_NXT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT2_NXT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT2_NXT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT2_NXT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SIT_SUVD_CGC_GATE
+#define SIT_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SIT_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SIT_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SIT_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SIT_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SIT_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SIT_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SIT_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SIT_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SIT_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SIT_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SIT_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SIT_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SIT_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SIT_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SIT_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SIT_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SIT_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SIT_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SIT_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SIT_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SIT_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SIT_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SIT_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SIT_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SIT_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SIT_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SIT_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SIT_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SIT_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SIT_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SIT_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SIT_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SIT_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SIT_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SIT_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SIT_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SIT_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SIT_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SIT_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SIT_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SIT_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SIT_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SIT_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SIT_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SIT_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SIT_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SIT_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SIT_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SIT_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SIT_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SIT_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SIT_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SIT_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SIT_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SIT_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SIT_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SIT_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SIT_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SIT_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SMPA_SUVD_CGC_GATE
+#define SMPA_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SMPA_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SMPA_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SMPA_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SMPA_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SMPA_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SMPA_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SMPA_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SMPA_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SMPA_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SMPA_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SMPA_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SMPA_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SMPA_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SMPA_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SMPA_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SMPA_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SMPA_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SMPA_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SMPA_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SMPA_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SMPA_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SMPA_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SMPA_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SMPA_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SMPA_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SMPA_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SMPA_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SMPA_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SMPA_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SMPA_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SMPA_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SMPA_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SMPA_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SMPA_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SMPA_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SMPA_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SMPA_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SMPA_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SMPA_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SMPA_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SMPA_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SMPA_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SMPA_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SMPA_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SMPA_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SMPA_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SMPA_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SMPA_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SMPA_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SMPA_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SMPA_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SMPA_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SMPA_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SMPA_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SMPA_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SMPA_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SMPA_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SMPA_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SMP_SUVD_CGC_GATE
+#define SMP_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SMP_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SMP_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SMP_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SMP_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SMP_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SMP_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SMP_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SMP_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SMP_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SMP_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SMP_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SMP_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SMP_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SMP_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SMP_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SMP_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SMP_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SMP_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SMP_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SMP_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SMP_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SMP_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SMP_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SMP_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SMP_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SMP_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SMP_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SMP_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SMP_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SMP_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SMP_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SMP_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SMP_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SMP_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SMP_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SMP_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SMP_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SMP_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SMP_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SMP_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SMP_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SMP_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SMP_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SMP_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SMP_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SMP_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SMP_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SMP_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SMP_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SMP_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SMP_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SMP_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SMP_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SMP_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SMP_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SMP_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SMP_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SMP_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SMP_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//SRE_SUVD_CGC_GATE
+#define SRE_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define SRE_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define SRE_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define SRE_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define SRE_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define SRE_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define SRE_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define SRE_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define SRE_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define SRE_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define SRE_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define SRE_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define SRE_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define SRE_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define SRE_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define SRE_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define SRE_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define SRE_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define SRE_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define SRE_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define SRE_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define SRE_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define SRE_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define SRE_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define SRE_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define SRE_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define SRE_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define SRE_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define SRE_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define SRE_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define SRE_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define SRE_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define SRE_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define SRE_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define SRE_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define SRE_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define SRE_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define SRE_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define SRE_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define SRE_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define SRE_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define SRE_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define SRE_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define SRE_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define SRE_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define SRE_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define SRE_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define SRE_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define SRE_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define SRE_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define SRE_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define SRE_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define SRE_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define SRE_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define SRE_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define SRE_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define SRE_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define SRE_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define SRE_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define SRE_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//UVD_MPBE0_SUVD_CGC_GATE
+#define UVD_MPBE0_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define UVD_MPBE0_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define UVD_MPBE0_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define UVD_MPBE0_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define UVD_MPBE0_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define UVD_MPBE0_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define UVD_MPBE0_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define UVD_MPBE0_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define UVD_MPBE0_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define UVD_MPBE0_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define UVD_MPBE0_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define UVD_MPBE0_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define UVD_MPBE0_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define UVD_MPBE0_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define UVD_MPBE0_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define UVD_MPBE0_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define UVD_MPBE0_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define UVD_MPBE0_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define UVD_MPBE0_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define UVD_MPBE0_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define UVD_MPBE0_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define UVD_MPBE0_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define UVD_MPBE0_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define UVD_MPBE0_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define UVD_MPBE0_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define UVD_MPBE0_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define UVD_MPBE0_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define UVD_MPBE0_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define UVD_MPBE0_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define UVD_MPBE0_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define UVD_MPBE0_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define UVD_MPBE0_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define UVD_MPBE0_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define UVD_MPBE0_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define UVD_MPBE0_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define UVD_MPBE0_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define UVD_MPBE0_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define UVD_MPBE0_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define UVD_MPBE0_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define UVD_MPBE0_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define UVD_MPBE0_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define UVD_MPBE0_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define UVD_MPBE0_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define UVD_MPBE0_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//UVD_MPBE1_SUVD_CGC_GATE
+#define UVD_MPBE1_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define UVD_MPBE1_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define UVD_MPBE1_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define UVD_MPBE1_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define UVD_MPBE1_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define UVD_MPBE1_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define UVD_MPBE1_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define UVD_MPBE1_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define UVD_MPBE1_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define UVD_MPBE1_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define UVD_MPBE1_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define UVD_MPBE1_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define UVD_MPBE1_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define UVD_MPBE1_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define UVD_MPBE1_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define UVD_MPBE1_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define UVD_MPBE1_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define UVD_MPBE1_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define UVD_MPBE1_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define UVD_MPBE1_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define UVD_MPBE1_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define UVD_MPBE1_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define UVD_MPBE1_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define UVD_MPBE1_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define UVD_MPBE1_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define UVD_MPBE1_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define UVD_MPBE1_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define UVD_MPBE1_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define UVD_MPBE1_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define UVD_MPBE1_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define UVD_MPBE1_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define UVD_MPBE1_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define UVD_MPBE1_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define UVD_MPBE1_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define UVD_MPBE1_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define UVD_MPBE1_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define UVD_MPBE1_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define UVD_MPBE1_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define UVD_MPBE1_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define UVD_MPBE1_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define UVD_MPBE1_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define UVD_MPBE1_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define UVD_MPBE1_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define UVD_MPBE1_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//UVD_SUVD_CGC_GATE
+#define UVD_SUVD_CGC_GATE__SRE__SHIFT 0x0
+#define UVD_SUVD_CGC_GATE__SIT__SHIFT 0x1
+#define UVD_SUVD_CGC_GATE__SMP__SHIFT 0x2
+#define UVD_SUVD_CGC_GATE__SCM__SHIFT 0x3
+#define UVD_SUVD_CGC_GATE__SDB__SHIFT 0x4
+#define UVD_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5
+#define UVD_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6
+#define UVD_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7
+#define UVD_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8
+#define UVD_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9
+#define UVD_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa
+#define UVD_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb
+#define UVD_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc
+#define UVD_SUVD_CGC_GATE__SCLR__SHIFT 0xd
+#define UVD_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe
+#define UVD_SUVD_CGC_GATE__ENT__SHIFT 0xf
+#define UVD_SUVD_CGC_GATE__IME__SHIFT 0x10
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12
+#define UVD_SUVD_CGC_GATE__SITE__SHIFT 0x13
+#define UVD_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14
+#define UVD_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15
+#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16
+#define UVD_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17
+#define UVD_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18
+#define UVD_SUVD_CGC_GATE__EFC__SHIFT 0x19
+#define UVD_SUVD_CGC_GATE__SAOE__SHIFT 0x1a
+#define UVD_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b
+#define UVD_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e
+#define UVD_SUVD_CGC_GATE__SMPA__SHIFT 0x1f
+#define UVD_SUVD_CGC_GATE__SRE_MASK 0x00000001L
+#define UVD_SUVD_CGC_GATE__SIT_MASK 0x00000002L
+#define UVD_SUVD_CGC_GATE__SMP_MASK 0x00000004L
+#define UVD_SUVD_CGC_GATE__SCM_MASK 0x00000008L
+#define UVD_SUVD_CGC_GATE__SDB_MASK 0x00000010L
+#define UVD_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L
+#define UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L
+#define UVD_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L
+#define UVD_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L
+#define UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L
+#define UVD_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L
+#define UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L
+#define UVD_SUVD_CGC_GATE__SCLR_MASK 0x00002000L
+#define UVD_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L
+#define UVD_SUVD_CGC_GATE__ENT_MASK 0x00008000L
+#define UVD_SUVD_CGC_GATE__IME_MASK 0x00010000L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L
+#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L
+#define UVD_SUVD_CGC_GATE__SITE_MASK 0x00080000L
+#define UVD_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L
+#define UVD_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L
+#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L
+#define UVD_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L
+#define UVD_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L
+#define UVD_SUVD_CGC_GATE__EFC_MASK 0x02000000L
+#define UVD_SUVD_CGC_GATE__SAOE_MASK 0x04000000L
+#define UVD_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L
+#define UVD_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L
+#define UVD_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L
+#define UVD_SUVD_CGC_GATE__SMPA_MASK 0x80000000L
+//AVM_SUVD_CGC_GATE2
+#define AVM_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define AVM_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define AVM_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define AVM_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define AVM_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define AVM_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define AVM_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define AVM_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define AVM_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define AVM_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define AVM_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define AVM_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define AVM_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define AVM_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define AVM_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define AVM_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define AVM_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define AVM_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define AVM_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//CDEFE_SUVD_CGC_GATE2
+#define CDEFE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define CDEFE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define CDEFE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define CDEFE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define CDEFE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define CDEFE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define CDEFE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define CDEFE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define CDEFE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define CDEFE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define CDEFE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define CDEFE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define CDEFE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define CDEFE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define CDEFE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define CDEFE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define CDEFE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define CDEFE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define CDEFE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//DBR_SUVD_CGC_GATE2
+#define DBR_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define DBR_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define DBR_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define DBR_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define DBR_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define DBR_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define DBR_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define DBR_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define DBR_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define DBR_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define DBR_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define DBR_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define DBR_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define DBR_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define DBR_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define DBR_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define DBR_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define DBR_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define DBR_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//ENT_SUVD_CGC_GATE2
+#define ENT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define ENT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define ENT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define ENT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define ENT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define ENT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define ENT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define ENT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define ENT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define ENT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define ENT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define ENT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define ENT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define ENT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define ENT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define ENT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define ENT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define ENT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define ENT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//IME_SUVD_CGC_GATE2
+#define IME_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define IME_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define IME_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define IME_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define IME_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define IME_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define IME_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define IME_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define IME_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define IME_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define IME_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define IME_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define IME_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define IME_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define IME_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define IME_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define IME_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define IME_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define IME_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define IME_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define IME_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define IME_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define IME_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define IME_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//MPC1_SUVD_CGC_GATE2
+#define MPC1_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define MPC1_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define MPC1_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define MPC1_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define MPC1_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define MPC1_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define MPC1_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define MPC1_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define MPC1_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define MPC1_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define MPC1_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define MPC1_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define MPC1_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define MPC1_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define MPC1_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define MPC1_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define MPC1_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define MPC1_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define MPC1_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define MPC1_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define MPC1_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define MPC1_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define MPC1_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define MPC1_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SAOE_SUVD_CGC_GATE2
+#define SAOE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SAOE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SAOE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SAOE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SAOE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SAOE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SAOE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SAOE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SAOE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SAOE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SAOE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SAOE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SAOE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SAOE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SAOE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SAOE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SAOE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SAOE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SAOE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SDB_SUVD_CGC_GATE2
+#define SDB_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SDB_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SDB_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SDB_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SDB_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SDB_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SDB_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SDB_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SDB_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SDB_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SDB_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SDB_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SDB_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SDB_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SDB_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SDB_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SDB_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SDB_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SDB_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT0_NXT_SUVD_CGC_GATE2
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT0_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT0_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT0_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT0_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT0_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT0_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT0_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT0_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT0_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT0_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT0_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT1_NXT_SUVD_CGC_GATE2
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT1_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT1_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT1_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT1_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT1_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT1_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT1_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT1_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT1_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT1_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT1_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT2_NXT_SUVD_CGC_GATE2
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT2_NXT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT2_NXT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT2_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT2_NXT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT2_NXT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT2_NXT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT2_NXT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT2_NXT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT2_NXT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT2_NXT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT2_NXT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SIT_SUVD_CGC_GATE2
+#define SIT_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SIT_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SIT_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SIT_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SIT_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SIT_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SIT_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SIT_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SIT_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SIT_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SIT_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SIT_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SIT_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SIT_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SIT_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SIT_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SIT_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SIT_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SIT_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SMPA_SUVD_CGC_GATE2
+#define SMPA_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SMPA_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SMPA_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SMPA_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SMPA_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SMPA_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SMPA_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SMPA_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SMPA_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SMPA_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SMPA_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SMPA_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SMPA_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SMPA_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SMPA_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SMPA_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SMPA_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SMPA_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SMPA_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SMP_SUVD_CGC_GATE2
+#define SMP_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SMP_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SMP_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SMP_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SMP_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SMP_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SMP_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SMP_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SMP_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SMP_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SMP_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SMP_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SMP_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SMP_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SMP_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SMP_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SMP_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SMP_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SMP_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//SRE_SUVD_CGC_GATE2
+#define SRE_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define SRE_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define SRE_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define SRE_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define SRE_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define SRE_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define SRE_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define SRE_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define SRE_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define SRE_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define SRE_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define SRE_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define SRE_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define SRE_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define SRE_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define SRE_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define SRE_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define SRE_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define SRE_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//UVD_MPBE0_SUVD_CGC_GATE2
+#define UVD_MPBE0_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define UVD_MPBE0_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define UVD_MPBE0_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define UVD_MPBE0_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define UVD_MPBE0_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define UVD_MPBE0_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define UVD_MPBE0_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define UVD_MPBE0_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define UVD_MPBE0_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define UVD_MPBE0_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define UVD_MPBE0_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define UVD_MPBE0_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define UVD_MPBE0_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define UVD_MPBE0_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+//UVD_MPBE1_SUVD_CGC_GATE2
+#define UVD_MPBE1_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define UVD_MPBE1_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define UVD_MPBE1_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define UVD_MPBE1_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define UVD_MPBE1_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define UVD_MPBE1_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define UVD_MPBE1_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define UVD_MPBE1_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define UVD_MPBE1_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define UVD_MPBE1_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define UVD_MPBE1_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define UVD_MPBE1_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define UVD_MPBE1_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define UVD_MPBE1_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+//UVD_SUVD_CGC_GATE2
+#define UVD_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0
+#define UVD_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1
+#define UVD_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2
+#define UVD_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3
+#define UVD_SUVD_CGC_GATE2__MPC1__SHIFT 0x4
+#define UVD_SUVD_CGC_GATE2__SRE_AV1_ENC__SHIFT 0x5
+#define UVD_SUVD_CGC_GATE2__CDEFE__SHIFT 0x6
+#define UVD_SUVD_CGC_GATE2__AVM_0__SHIFT 0x7
+#define UVD_SUVD_CGC_GATE2__AVM_1__SHIFT 0x8
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_CMN__SHIFT 0x9
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_DEC__SHIFT 0xa
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_ENC__SHIFT 0xb
+#define UVD_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L
+#define UVD_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L
+#define UVD_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L
+#define UVD_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L
+#define UVD_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L
+#define UVD_SUVD_CGC_GATE2__SRE_AV1_ENC_MASK 0x00000020L
+#define UVD_SUVD_CGC_GATE2__CDEFE_MASK 0x00000040L
+#define UVD_SUVD_CGC_GATE2__AVM_0_MASK 0x00000080L
+#define UVD_SUVD_CGC_GATE2__AVM_1_MASK 0x00000100L
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_CMN_MASK 0x00000200L
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_DEC_MASK 0x00000400L
+#define UVD_SUVD_CGC_GATE2__SIT_NXT_ENC_MASK 0x00000800L
+//AVM_SUVD_CGC_CTRL
+#define AVM_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define AVM_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define AVM_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define AVM_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define AVM_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define AVM_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define AVM_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define AVM_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define AVM_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define AVM_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define AVM_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define AVM_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define AVM_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define AVM_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define AVM_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define AVM_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define AVM_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define AVM_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define AVM_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define AVM_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define AVM_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define AVM_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define AVM_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define AVM_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define AVM_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define AVM_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define AVM_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define AVM_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define AVM_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define AVM_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define AVM_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define AVM_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define AVM_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define AVM_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define AVM_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define AVM_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define AVM_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define AVM_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define AVM_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define AVM_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define AVM_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define AVM_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define AVM_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define AVM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define AVM_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define AVM_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define AVM_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//CDEFE_SUVD_CGC_CTRL
+#define CDEFE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define CDEFE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define CDEFE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define CDEFE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define CDEFE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define CDEFE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define CDEFE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define CDEFE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define CDEFE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define CDEFE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define CDEFE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define CDEFE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define CDEFE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define CDEFE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define CDEFE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define CDEFE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define CDEFE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define CDEFE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define CDEFE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define CDEFE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define CDEFE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define CDEFE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define CDEFE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define CDEFE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define CDEFE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define CDEFE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define CDEFE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define CDEFE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define CDEFE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define CDEFE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define CDEFE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define CDEFE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define CDEFE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define CDEFE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define CDEFE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define CDEFE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define CDEFE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define CDEFE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define CDEFE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define CDEFE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define CDEFE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define CDEFE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define CDEFE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define CDEFE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define CDEFE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define CDEFE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//DBR_SUVD_CGC_CTRL
+#define DBR_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define DBR_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define DBR_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define DBR_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define DBR_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define DBR_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define DBR_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define DBR_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define DBR_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define DBR_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define DBR_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define DBR_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define DBR_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define DBR_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define DBR_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define DBR_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define DBR_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define DBR_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define DBR_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define DBR_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define DBR_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define DBR_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define DBR_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define DBR_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define DBR_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define DBR_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define DBR_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define DBR_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define DBR_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define DBR_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define DBR_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define DBR_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define DBR_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define DBR_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define DBR_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define DBR_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define DBR_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define DBR_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define DBR_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define DBR_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define DBR_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define DBR_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define DBR_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define DBR_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define DBR_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define DBR_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define DBR_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//EFC_SUVD_CGC_CTRL
+#define EFC_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define EFC_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define EFC_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define EFC_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define EFC_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define EFC_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define EFC_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define EFC_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define EFC_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define EFC_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define EFC_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define EFC_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define EFC_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define EFC_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define EFC_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define EFC_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define EFC_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define EFC_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define EFC_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define EFC_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define EFC_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define EFC_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define EFC_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define EFC_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define EFC_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define EFC_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define EFC_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define EFC_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define EFC_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define EFC_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define EFC_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define EFC_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define EFC_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define EFC_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define EFC_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define EFC_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define EFC_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define EFC_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define EFC_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define EFC_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define EFC_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define EFC_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define EFC_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define EFC_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define EFC_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define EFC_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define EFC_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//ENT_SUVD_CGC_CTRL
+#define ENT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define ENT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define ENT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define ENT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define ENT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define ENT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define ENT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define ENT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define ENT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define ENT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define ENT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define ENT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define ENT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define ENT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define ENT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define ENT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define ENT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define ENT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define ENT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define ENT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define ENT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define ENT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define ENT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define ENT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define ENT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define ENT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define ENT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define ENT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define ENT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define ENT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define ENT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define ENT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define ENT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define ENT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define ENT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define ENT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define ENT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define ENT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define ENT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define ENT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define ENT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define ENT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define ENT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define ENT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define ENT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define ENT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define ENT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//IME_SUVD_CGC_CTRL
+#define IME_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define IME_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define IME_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define IME_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define IME_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define IME_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define IME_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define IME_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define IME_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define IME_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define IME_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define IME_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define IME_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define IME_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define IME_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define IME_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define IME_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define IME_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define IME_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define IME_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define IME_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define IME_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define IME_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define IME_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define IME_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define IME_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define IME_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define IME_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define IME_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define IME_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define IME_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define IME_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define IME_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define IME_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define IME_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define IME_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define IME_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define IME_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define IME_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define IME_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define IME_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define IME_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define IME_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define IME_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define IME_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define IME_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define IME_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define IME_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define IME_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define IME_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define IME_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define IME_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//MPC1_SUVD_CGC_CTRL
+#define MPC1_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define MPC1_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define MPC1_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define MPC1_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define MPC1_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define MPC1_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define MPC1_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define MPC1_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define MPC1_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define MPC1_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define MPC1_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define MPC1_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define MPC1_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define MPC1_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define MPC1_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define MPC1_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define MPC1_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define MPC1_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define MPC1_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define MPC1_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define MPC1_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define MPC1_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define MPC1_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define MPC1_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define MPC1_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define MPC1_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define MPC1_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define MPC1_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define MPC1_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define MPC1_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define MPC1_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define MPC1_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define MPC1_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define MPC1_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define MPC1_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define MPC1_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define MPC1_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define MPC1_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define MPC1_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define MPC1_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define MPC1_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define MPC1_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define MPC1_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define MPC1_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define MPC1_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define MPC1_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define MPC1_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define MPC1_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define MPC1_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define MPC1_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define MPC1_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define MPC1_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//PPU_SUVD_CGC_CTRL
+#define PPU_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define PPU_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define PPU_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define PPU_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define PPU_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define PPU_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define PPU_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define PPU_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define PPU_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define PPU_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define PPU_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define PPU_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define PPU_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define PPU_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define PPU_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define PPU_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define PPU_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define PPU_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define PPU_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define PPU_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define PPU_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define PPU_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define PPU_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define PPU_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define PPU_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define PPU_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define PPU_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define PPU_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define PPU_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define PPU_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define PPU_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define PPU_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define PPU_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define PPU_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define PPU_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define PPU_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define PPU_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define PPU_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define PPU_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define PPU_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define PPU_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define PPU_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define PPU_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define PPU_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define PPU_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define PPU_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define PPU_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SAOE_SUVD_CGC_CTRL
+#define SAOE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SAOE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SAOE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SAOE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SAOE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SAOE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SAOE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SAOE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SAOE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SAOE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SAOE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SAOE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SAOE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SAOE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SAOE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SAOE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SAOE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SAOE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SAOE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SAOE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SAOE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SAOE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SAOE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SAOE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SAOE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SAOE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SAOE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SAOE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SAOE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SAOE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SAOE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SAOE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SAOE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SAOE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SAOE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SAOE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SAOE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SAOE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SAOE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SAOE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SAOE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SAOE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SAOE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SAOE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SAOE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SAOE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SAOE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SCM_SUVD_CGC_CTRL
+#define SCM_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SCM_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SCM_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SCM_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SCM_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SCM_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SCM_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SCM_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SCM_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SCM_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SCM_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SCM_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SCM_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SCM_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SCM_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SCM_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SCM_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SCM_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SCM_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SCM_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SCM_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SCM_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SCM_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SCM_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SCM_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SCM_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SCM_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SCM_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SCM_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SCM_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SCM_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SCM_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SCM_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SCM_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SCM_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SCM_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SCM_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SCM_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SCM_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SCM_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SCM_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SCM_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SCM_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SCM_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SCM_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SCM_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SCM_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SDB_SUVD_CGC_CTRL
+#define SDB_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SDB_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SDB_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SDB_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SDB_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SDB_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SDB_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SDB_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SDB_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SDB_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SDB_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SDB_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SDB_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SDB_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SDB_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SDB_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SDB_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SDB_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SDB_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SDB_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SDB_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SDB_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SDB_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SDB_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SDB_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SDB_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SDB_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SDB_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SDB_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SDB_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SDB_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SDB_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SDB_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SDB_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SDB_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SDB_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SDB_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SDB_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SDB_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SDB_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SDB_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SDB_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SDB_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SDB_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SDB_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SDB_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SDB_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT0_NXT_SUVD_CGC_CTRL
+#define SIT0_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT0_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT0_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT0_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT0_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT0_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT0_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT0_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT0_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT0_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT0_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT0_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT0_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT0_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT0_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT0_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT0_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT0_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT0_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT0_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT0_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT0_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT0_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT0_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT0_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT0_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT0_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT0_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT0_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT0_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT1_NXT_SUVD_CGC_CTRL
+#define SIT1_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT1_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT1_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT1_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT1_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT1_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT1_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT1_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT1_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT1_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT1_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT1_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT1_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT1_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT1_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT1_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT1_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT1_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT1_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT1_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT1_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT1_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT1_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT1_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT1_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT1_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT1_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT1_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT1_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT1_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT2_NXT_SUVD_CGC_CTRL
+#define SIT2_NXT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT2_NXT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT2_NXT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT2_NXT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT2_NXT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT2_NXT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT2_NXT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT2_NXT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT2_NXT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT2_NXT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT2_NXT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT2_NXT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT2_NXT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT2_NXT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT2_NXT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT2_NXT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT2_NXT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT2_NXT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT2_NXT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT2_NXT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT2_NXT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT2_NXT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT2_NXT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT2_NXT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT2_NXT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT2_NXT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT2_NXT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT2_NXT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT2_NXT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT2_NXT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SIT_SUVD_CGC_CTRL
+#define SIT_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SIT_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SIT_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SIT_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SIT_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SIT_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SIT_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SIT_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SIT_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SIT_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SIT_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SIT_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SIT_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SIT_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SIT_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SIT_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SIT_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SIT_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SIT_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SIT_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SIT_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SIT_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SIT_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SIT_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SIT_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SIT_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SIT_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SIT_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SIT_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SIT_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SIT_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SIT_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SIT_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SIT_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SIT_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SIT_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SIT_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SIT_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SIT_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SIT_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SIT_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SIT_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SIT_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SIT_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SIT_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SIT_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SIT_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SMPA_SUVD_CGC_CTRL
+#define SMPA_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SMPA_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SMPA_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SMPA_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SMPA_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SMPA_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SMPA_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SMPA_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SMPA_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SMPA_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SMPA_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SMPA_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SMPA_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SMPA_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SMPA_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SMPA_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SMPA_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SMPA_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SMPA_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SMPA_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SMPA_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SMPA_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SMPA_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SMPA_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SMPA_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SMPA_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SMPA_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SMPA_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SMPA_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SMPA_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SMPA_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SMPA_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SMPA_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SMPA_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SMPA_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SMPA_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SMPA_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SMPA_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SMPA_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SMPA_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SMPA_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SMPA_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SMPA_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SMPA_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SMPA_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SMPA_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SMPA_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SMP_SUVD_CGC_CTRL
+#define SMP_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SMP_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SMP_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SMP_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SMP_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SMP_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SMP_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SMP_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SMP_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SMP_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SMP_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SMP_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SMP_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SMP_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SMP_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SMP_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SMP_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SMP_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SMP_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SMP_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SMP_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SMP_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SMP_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SMP_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SMP_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SMP_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SMP_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SMP_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SMP_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SMP_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SMP_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SMP_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SMP_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SMP_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SMP_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SMP_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SMP_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SMP_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SMP_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SMP_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SMP_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SMP_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SMP_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SMP_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SMP_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SMP_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SMP_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//SRE_SUVD_CGC_CTRL
+#define SRE_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define SRE_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define SRE_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define SRE_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define SRE_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define SRE_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define SRE_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define SRE_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define SRE_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define SRE_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define SRE_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define SRE_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define SRE_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define SRE_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define SRE_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define SRE_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define SRE_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define SRE_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define SRE_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define SRE_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define SRE_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define SRE_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define SRE_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define SRE_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define SRE_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define SRE_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define SRE_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define SRE_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define SRE_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define SRE_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define SRE_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define SRE_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define SRE_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define SRE_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define SRE_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define SRE_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define SRE_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define SRE_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define SRE_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define SRE_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define SRE_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define SRE_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define SRE_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define SRE_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define SRE_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define SRE_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define SRE_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//UVD_MPBE0_SUVD_CGC_CTRL
+#define UVD_MPBE0_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define UVD_MPBE0_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define UVD_MPBE0_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define UVD_MPBE0_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define UVD_MPBE0_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define UVD_MPBE0_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define UVD_MPBE0_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define UVD_MPBE0_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define UVD_MPBE0_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define UVD_MPBE0_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define UVD_MPBE0_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define UVD_MPBE0_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define UVD_MPBE0_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define UVD_MPBE0_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define UVD_MPBE0_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define UVD_MPBE0_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define UVD_MPBE0_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define UVD_MPBE0_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define UVD_MPBE0_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define UVD_MPBE0_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define UVD_MPBE0_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define UVD_MPBE0_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define UVD_MPBE0_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define UVD_MPBE0_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define UVD_MPBE0_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define UVD_MPBE0_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define UVD_MPBE0_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define UVD_MPBE0_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define UVD_MPBE0_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define UVD_MPBE0_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define UVD_MPBE0_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define UVD_MPBE0_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define UVD_MPBE0_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define UVD_MPBE0_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define UVD_MPBE0_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define UVD_MPBE0_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define UVD_MPBE0_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define UVD_MPBE0_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define UVD_MPBE0_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define UVD_MPBE0_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define UVD_MPBE0_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define UVD_MPBE0_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//UVD_MPBE1_SUVD_CGC_CTRL
+#define UVD_MPBE1_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define UVD_MPBE1_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define UVD_MPBE1_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define UVD_MPBE1_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define UVD_MPBE1_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define UVD_MPBE1_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define UVD_MPBE1_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define UVD_MPBE1_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define UVD_MPBE1_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define UVD_MPBE1_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define UVD_MPBE1_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define UVD_MPBE1_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define UVD_MPBE1_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define UVD_MPBE1_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define UVD_MPBE1_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define UVD_MPBE1_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define UVD_MPBE1_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define UVD_MPBE1_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define UVD_MPBE1_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define UVD_MPBE1_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define UVD_MPBE1_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define UVD_MPBE1_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define UVD_MPBE1_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define UVD_MPBE1_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define UVD_MPBE1_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define UVD_MPBE1_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define UVD_MPBE1_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define UVD_MPBE1_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define UVD_MPBE1_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define UVD_MPBE1_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define UVD_MPBE1_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define UVD_MPBE1_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define UVD_MPBE1_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define UVD_MPBE1_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define UVD_MPBE1_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define UVD_MPBE1_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define UVD_MPBE1_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define UVD_MPBE1_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define UVD_MPBE1_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define UVD_MPBE1_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define UVD_MPBE1_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define UVD_MPBE1_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//UVD_SUVD_CGC_CTRL
+#define UVD_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0
+#define UVD_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1
+#define UVD_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2
+#define UVD_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3
+#define UVD_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4
+#define UVD_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5
+#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6
+#define UVD_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7
+#define UVD_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8
+#define UVD_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9
+#define UVD_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa
+#define UVD_SUVD_CGC_CTRL__SAOE_MODE__SHIFT 0xb
+#define UVD_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc
+#define UVD_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd
+#define UVD_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe
+#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf
+#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10
+#define UVD_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11
+#define UVD_SUVD_CGC_CTRL__AVM_0_MODE__SHIFT 0x12
+#define UVD_SUVD_CGC_CTRL__AVM_1_MODE__SHIFT 0x13
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE__SHIFT 0x14
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE__SHIFT 0x15
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE__SHIFT 0x16
+#define UVD_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_CTRL__CDEFE_MODE__SHIFT 0x1e
+#define UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L
+#define UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L
+#define UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L
+#define UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L
+#define UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L
+#define UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L
+#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L
+#define UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L
+#define UVD_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L
+#define UVD_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L
+#define UVD_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L
+#define UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK 0x00000800L
+#define UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L
+#define UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L
+#define UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L
+#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L
+#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L
+#define UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L
+#define UVD_SUVD_CGC_CTRL__AVM_0_MODE_MASK 0x00040000L
+#define UVD_SUVD_CGC_CTRL__AVM_1_MODE_MASK 0x00080000L
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_CMN_MODE_MASK 0x00100000L
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_DEC_MODE_MASK 0x00200000L
+#define UVD_SUVD_CGC_CTRL__SIT_NXT_ENC_MODE_MASK 0x00400000L
+#define UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L
+#define UVD_SUVD_CGC_CTRL__CDEFE_MODE_MASK 0x40000000L
+//UVD_CGC_CTRL3
+#define UVD_CGC_CTRL3__CGC_CLK_OFF_DELAY__SHIFT 0x0
+#define UVD_CGC_CTRL3__LCM0_MODE__SHIFT 0xb
+#define UVD_CGC_CTRL3__LCM1_MODE__SHIFT 0xc
+#define UVD_CGC_CTRL3__MIF_MODE__SHIFT 0xd
+#define UVD_CGC_CTRL3__VREG_MODE__SHIFT 0xe
+#define UVD_CGC_CTRL3__PE_MODE__SHIFT 0xf
+#define UVD_CGC_CTRL3__PPU_MODE__SHIFT 0x10
+#define UVD_CGC_CTRL3__CGC_CLK_OFF_DELAY_MASK 0x000000FFL
+#define UVD_CGC_CTRL3__LCM0_MODE_MASK 0x00000800L
+#define UVD_CGC_CTRL3__LCM1_MODE_MASK 0x00001000L
+#define UVD_CGC_CTRL3__MIF_MODE_MASK 0x00002000L
+#define UVD_CGC_CTRL3__VREG_MODE_MASK 0x00004000L
+#define UVD_CGC_CTRL3__PE_MODE_MASK 0x00008000L
+#define UVD_CGC_CTRL3__PPU_MODE_MASK 0x00010000L
+//UVD_GPCOM_VCPU_DATA0
+#define UVD_GPCOM_VCPU_DATA0__DATA0__SHIFT 0x0
+#define UVD_GPCOM_VCPU_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_GPCOM_VCPU_DATA1
+#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x0
+#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_GPCOM_SYS_CMD
+#define UVD_GPCOM_SYS_CMD__CMD_SEND__SHIFT 0x0
+#define UVD_GPCOM_SYS_CMD__CMD__SHIFT 0x1
+#define UVD_GPCOM_SYS_CMD__CMD_SOURCE__SHIFT 0x1f
+#define UVD_GPCOM_SYS_CMD__CMD_SEND_MASK 0x00000001L
+#define UVD_GPCOM_SYS_CMD__CMD_MASK 0x7FFFFFFEL
+#define UVD_GPCOM_SYS_CMD__CMD_SOURCE_MASK 0x80000000L
+//UVD_GPCOM_SYS_DATA0
+#define UVD_GPCOM_SYS_DATA0__DATA0__SHIFT 0x0
+#define UVD_GPCOM_SYS_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_GPCOM_SYS_DATA1
+#define UVD_GPCOM_SYS_DATA1__DATA1__SHIFT 0x0
+#define UVD_GPCOM_SYS_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_VCPU_INT_EN
+#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1
+#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2
+#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN__SHIFT 0x3
+#define UVD_VCPU_INT_EN__SW_RB1_INT_EN__SHIFT 0x4
+#define UVD_VCPU_INT_EN__SW_RB2_INT_EN__SHIFT 0x5
+#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6
+#define UVD_VCPU_INT_EN__SW_RB3_INT_EN__SHIFT 0x7
+#define UVD_VCPU_INT_EN__SW_RB4_INT_EN__SHIFT 0x9
+#define UVD_VCPU_INT_EN__SW_RB5_INT_EN__SHIFT 0xa
+#define UVD_VCPU_INT_EN__LBSI_EN__SHIFT 0xb
+#define UVD_VCPU_INT_EN__UDEC_EN__SHIFT 0xc
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN__SHIFT 0xd
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN__SHIFT 0xe
+#define UVD_VCPU_INT_EN__SUVD_EN__SHIFT 0xf
+#define UVD_VCPU_INT_EN__RPTR_WR_EN__SHIFT 0x10
+#define UVD_VCPU_INT_EN__JOB_START_EN__SHIFT 0x11
+#define UVD_VCPU_INT_EN__NJ_PF_EN__SHIFT 0x12
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17
+#define UVD_VCPU_INT_EN__IDCT_EN__SHIFT 0x18
+#define UVD_VCPU_INT_EN__MPRD_EN__SHIFT 0x19
+#define UVD_VCPU_INT_EN__AVM_INT_EN__SHIFT 0x1a
+#define UVD_VCPU_INT_EN__CLK_SWT_EN__SHIFT 0x1b
+#define UVD_VCPU_INT_EN__MIF_HWINT_EN__SHIFT 0x1c
+#define UVD_VCPU_INT_EN__MPRD_ERR_EN__SHIFT 0x1d
+#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN__SHIFT 0x1e
+#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN__SHIFT 0x1f
+#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L
+#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L
+#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN_MASK 0x00000008L
+#define UVD_VCPU_INT_EN__SW_RB1_INT_EN_MASK 0x00000010L
+#define UVD_VCPU_INT_EN__SW_RB2_INT_EN_MASK 0x00000020L
+#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L
+#define UVD_VCPU_INT_EN__SW_RB3_INT_EN_MASK 0x00000080L
+#define UVD_VCPU_INT_EN__SW_RB4_INT_EN_MASK 0x00000200L
+#define UVD_VCPU_INT_EN__SW_RB5_INT_EN_MASK 0x00000400L
+#define UVD_VCPU_INT_EN__LBSI_EN_MASK 0x00000800L
+#define UVD_VCPU_INT_EN__UDEC_EN_MASK 0x00001000L
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN_MASK 0x00002000L
+#define UVD_VCPU_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN_MASK 0x00004000L
+#define UVD_VCPU_INT_EN__SUVD_EN_MASK 0x00008000L
+#define UVD_VCPU_INT_EN__RPTR_WR_EN_MASK 0x00010000L
+#define UVD_VCPU_INT_EN__JOB_START_EN_MASK 0x00020000L
+#define UVD_VCPU_INT_EN__NJ_PF_EN_MASK 0x00040000L
+#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L
+#define UVD_VCPU_INT_EN__IDCT_EN_MASK 0x01000000L
+#define UVD_VCPU_INT_EN__MPRD_EN_MASK 0x02000000L
+#define UVD_VCPU_INT_EN__AVM_INT_EN_MASK 0x04000000L
+#define UVD_VCPU_INT_EN__CLK_SWT_EN_MASK 0x08000000L
+#define UVD_VCPU_INT_EN__MIF_HWINT_EN_MASK 0x10000000L
+#define UVD_VCPU_INT_EN__MPRD_ERR_EN_MASK 0x20000000L
+#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN_MASK 0x40000000L
+#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN_MASK 0x80000000L
+//UVD_VCPU_INT_STATUS
+#define UVD_VCPU_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1
+#define UVD_VCPU_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2
+#define UVD_VCPU_INT_STATUS__NJ_PF_RPT_INT__SHIFT 0x3
+#define UVD_VCPU_INT_STATUS__SW_RB1_INT__SHIFT 0x4
+#define UVD_VCPU_INT_STATUS__SW_RB2_INT__SHIFT 0x5
+#define UVD_VCPU_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6
+#define UVD_VCPU_INT_STATUS__SW_RB3_INT__SHIFT 0x7
+#define UVD_VCPU_INT_STATUS__SW_RB4_INT__SHIFT 0x9
+#define UVD_VCPU_INT_STATUS__SW_RB5_INT__SHIFT 0xa
+#define UVD_VCPU_INT_STATUS__LBSI_INT__SHIFT 0xb
+#define UVD_VCPU_INT_STATUS__UDEC_INT__SHIFT 0xc
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT__SHIFT 0xd
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0xe
+#define UVD_VCPU_INT_STATUS__SUVD_INT__SHIFT 0xf
+#define UVD_VCPU_INT_STATUS__RPTR_WR_INT__SHIFT 0x10
+#define UVD_VCPU_INT_STATUS__JOB_START_INT__SHIFT 0x11
+#define UVD_VCPU_INT_STATUS__NJ_PF_INT__SHIFT 0x12
+#define UVD_VCPU_INT_STATUS__GPCOM_INT__SHIFT 0x14
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17
+#define UVD_VCPU_INT_STATUS__IDCT_INT__SHIFT 0x18
+#define UVD_VCPU_INT_STATUS__MPRD_INT__SHIFT 0x19
+#define UVD_VCPU_INT_STATUS__AVM_INT__SHIFT 0x1a
+#define UVD_VCPU_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b
+#define UVD_VCPU_INT_STATUS__MIF_HWINT__SHIFT 0x1c
+#define UVD_VCPU_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d
+#define UVD_VCPU_INT_STATUS__DRV_FW_REQ_INT__SHIFT 0x1e
+#define UVD_VCPU_INT_STATUS__DRV_FW_ACK_INT__SHIFT 0x1f
+#define UVD_VCPU_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L
+#define UVD_VCPU_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L
+#define UVD_VCPU_INT_STATUS__NJ_PF_RPT_INT_MASK 0x00000008L
+#define UVD_VCPU_INT_STATUS__SW_RB1_INT_MASK 0x00000010L
+#define UVD_VCPU_INT_STATUS__SW_RB2_INT_MASK 0x00000020L
+#define UVD_VCPU_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L
+#define UVD_VCPU_INT_STATUS__SW_RB3_INT_MASK 0x00000080L
+#define UVD_VCPU_INT_STATUS__SW_RB4_INT_MASK 0x00000200L
+#define UVD_VCPU_INT_STATUS__SW_RB5_INT_MASK 0x00000400L
+#define UVD_VCPU_INT_STATUS__LBSI_INT_MASK 0x00000800L
+#define UVD_VCPU_INT_STATUS__UDEC_INT_MASK 0x00001000L
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT_MASK 0x00002000L
+#define UVD_VCPU_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00004000L
+#define UVD_VCPU_INT_STATUS__SUVD_INT_MASK 0x00008000L
+#define UVD_VCPU_INT_STATUS__RPTR_WR_INT_MASK 0x00010000L
+#define UVD_VCPU_INT_STATUS__JOB_START_INT_MASK 0x00020000L
+#define UVD_VCPU_INT_STATUS__NJ_PF_INT_MASK 0x00040000L
+#define UVD_VCPU_INT_STATUS__GPCOM_INT_MASK 0x00100000L
+#define UVD_VCPU_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L
+#define UVD_VCPU_INT_STATUS__IDCT_INT_MASK 0x01000000L
+#define UVD_VCPU_INT_STATUS__MPRD_INT_MASK 0x02000000L
+#define UVD_VCPU_INT_STATUS__AVM_INT_MASK 0x04000000L
+#define UVD_VCPU_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L
+#define UVD_VCPU_INT_STATUS__MIF_HWINT_MASK 0x10000000L
+#define UVD_VCPU_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L
+#define UVD_VCPU_INT_STATUS__DRV_FW_REQ_INT_MASK 0x40000000L
+#define UVD_VCPU_INT_STATUS__DRV_FW_ACK_INT_MASK 0x80000000L
+//UVD_VCPU_INT_ACK
+#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1
+#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2
+#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK__SHIFT 0x3
+#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK__SHIFT 0x4
+#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK__SHIFT 0x5
+#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6
+#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK__SHIFT 0x7
+#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK__SHIFT 0x9
+#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK__SHIFT 0xa
+#define UVD_VCPU_INT_ACK__LBSI_ACK__SHIFT 0xb
+#define UVD_VCPU_INT_ACK__UDEC_ACK__SHIFT 0xc
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK__SHIFT 0xd
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK__SHIFT 0xe
+#define UVD_VCPU_INT_ACK__SUVD_ACK__SHIFT 0xf
+#define UVD_VCPU_INT_ACK__RPTR_WR_ACK__SHIFT 0x10
+#define UVD_VCPU_INT_ACK__JOB_START_ACK__SHIFT 0x11
+#define UVD_VCPU_INT_ACK__NJ_PF_ACK__SHIFT 0x12
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17
+#define UVD_VCPU_INT_ACK__IDCT_ACK__SHIFT 0x18
+#define UVD_VCPU_INT_ACK__MPRD_ACK__SHIFT 0x19
+#define UVD_VCPU_INT_ACK__AVM_INT_ACK__SHIFT 0x1a
+#define UVD_VCPU_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b
+#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c
+#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d
+#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK__SHIFT 0x1e
+#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK__SHIFT 0x1f
+#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L
+#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L
+#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK_MASK 0x00000008L
+#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK_MASK 0x00000010L
+#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK_MASK 0x00000020L
+#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L
+#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK_MASK 0x00000080L
+#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK_MASK 0x00000200L
+#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK_MASK 0x00000400L
+#define UVD_VCPU_INT_ACK__LBSI_ACK_MASK 0x00000800L
+#define UVD_VCPU_INT_ACK__UDEC_ACK_MASK 0x00001000L
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK_MASK 0x00002000L
+#define UVD_VCPU_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK_MASK 0x00004000L
+#define UVD_VCPU_INT_ACK__SUVD_ACK_MASK 0x00008000L
+#define UVD_VCPU_INT_ACK__RPTR_WR_ACK_MASK 0x00010000L
+#define UVD_VCPU_INT_ACK__JOB_START_ACK_MASK 0x00020000L
+#define UVD_VCPU_INT_ACK__NJ_PF_ACK_MASK 0x00040000L
+#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L
+#define UVD_VCPU_INT_ACK__IDCT_ACK_MASK 0x01000000L
+#define UVD_VCPU_INT_ACK__MPRD_ACK_MASK 0x02000000L
+#define UVD_VCPU_INT_ACK__AVM_INT_ACK_MASK 0x04000000L
+#define UVD_VCPU_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L
+#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L
+#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L
+#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK_MASK 0x40000000L
+#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK_MASK 0x80000000L
+//UVD_VCPU_INT_ROUTE
+#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG__SHIFT 0x0
+#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK__SHIFT 0x1
+#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM__SHIFT 0x2
+#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG_MASK 0x00000001L
+#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK_MASK 0x00000002L
+#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM_MASK 0x00000004L
+//UVD_DRV_FW_MSG
+#define UVD_DRV_FW_MSG__MSG__SHIFT 0x0
+#define UVD_DRV_FW_MSG__MSG_MASK 0xFFFFFFFFL
+//UVD_FW_DRV_MSG_ACK
+#define UVD_FW_DRV_MSG_ACK__ACK__SHIFT 0x0
+#define UVD_FW_DRV_MSG_ACK__ACK_MASK 0x00000001L
+//UVD_SUVD_INT_EN
+#define UVD_SUVD_INT_EN__SRE_FUNC_INT_EN__SHIFT 0x0
+#define UVD_SUVD_INT_EN__SRE_ERR_INT_EN__SHIFT 0x5
+#define UVD_SUVD_INT_EN__SIT_FUNC_INT_EN__SHIFT 0x6
+#define UVD_SUVD_INT_EN__SIT_ERR_INT_EN__SHIFT 0xb
+#define UVD_SUVD_INT_EN__SMP_FUNC_INT_EN__SHIFT 0xc
+#define UVD_SUVD_INT_EN__SMP_ERR_INT_EN__SHIFT 0x11
+#define UVD_SUVD_INT_EN__SCM_FUNC_INT_EN__SHIFT 0x12
+#define UVD_SUVD_INT_EN__SCM_ERR_INT_EN__SHIFT 0x17
+#define UVD_SUVD_INT_EN__SDB_FUNC_INT_EN__SHIFT 0x18
+#define UVD_SUVD_INT_EN__SDB_ERR_INT_EN__SHIFT 0x1d
+#define UVD_SUVD_INT_EN__FBC_ERR_INT_EN__SHIFT 0x1e
+#define UVD_SUVD_INT_EN__SRE_FUNC_INT_EN_MASK 0x0000001FL
+#define UVD_SUVD_INT_EN__SRE_ERR_INT_EN_MASK 0x00000020L
+#define UVD_SUVD_INT_EN__SIT_FUNC_INT_EN_MASK 0x000007C0L
+#define UVD_SUVD_INT_EN__SIT_ERR_INT_EN_MASK 0x00000800L
+#define UVD_SUVD_INT_EN__SMP_FUNC_INT_EN_MASK 0x0001F000L
+#define UVD_SUVD_INT_EN__SMP_ERR_INT_EN_MASK 0x00020000L
+#define UVD_SUVD_INT_EN__SCM_FUNC_INT_EN_MASK 0x007C0000L
+#define UVD_SUVD_INT_EN__SCM_ERR_INT_EN_MASK 0x00800000L
+#define UVD_SUVD_INT_EN__SDB_FUNC_INT_EN_MASK 0x1F000000L
+#define UVD_SUVD_INT_EN__SDB_ERR_INT_EN_MASK 0x20000000L
+#define UVD_SUVD_INT_EN__FBC_ERR_INT_EN_MASK 0x40000000L
+//UVD_SUVD_INT_STATUS
+#define UVD_SUVD_INT_STATUS__SRE_FUNC_INT__SHIFT 0x0
+#define UVD_SUVD_INT_STATUS__SRE_ERR_INT__SHIFT 0x5
+#define UVD_SUVD_INT_STATUS__SIT_FUNC_INT__SHIFT 0x6
+#define UVD_SUVD_INT_STATUS__SIT_ERR_INT__SHIFT 0xb
+#define UVD_SUVD_INT_STATUS__SMP_FUNC_INT__SHIFT 0xc
+#define UVD_SUVD_INT_STATUS__SMP_ERR_INT__SHIFT 0x11
+#define UVD_SUVD_INT_STATUS__SCM_FUNC_INT__SHIFT 0x12
+#define UVD_SUVD_INT_STATUS__SCM_ERR_INT__SHIFT 0x17
+#define UVD_SUVD_INT_STATUS__SDB_FUNC_INT__SHIFT 0x18
+#define UVD_SUVD_INT_STATUS__SDB_ERR_INT__SHIFT 0x1d
+#define UVD_SUVD_INT_STATUS__FBC_ERR_INT__SHIFT 0x1e
+#define UVD_SUVD_INT_STATUS__SRE_FUNC_INT_MASK 0x0000001FL
+#define UVD_SUVD_INT_STATUS__SRE_ERR_INT_MASK 0x00000020L
+#define UVD_SUVD_INT_STATUS__SIT_FUNC_INT_MASK 0x000007C0L
+#define UVD_SUVD_INT_STATUS__SIT_ERR_INT_MASK 0x00000800L
+#define UVD_SUVD_INT_STATUS__SMP_FUNC_INT_MASK 0x0001F000L
+#define UVD_SUVD_INT_STATUS__SMP_ERR_INT_MASK 0x00020000L
+#define UVD_SUVD_INT_STATUS__SCM_FUNC_INT_MASK 0x007C0000L
+#define UVD_SUVD_INT_STATUS__SCM_ERR_INT_MASK 0x00800000L
+#define UVD_SUVD_INT_STATUS__SDB_FUNC_INT_MASK 0x1F000000L
+#define UVD_SUVD_INT_STATUS__SDB_ERR_INT_MASK 0x20000000L
+#define UVD_SUVD_INT_STATUS__FBC_ERR_INT_MASK 0x40000000L
+//UVD_SUVD_INT_ACK
+#define UVD_SUVD_INT_ACK__SRE_FUNC_INT_ACK__SHIFT 0x0
+#define UVD_SUVD_INT_ACK__SRE_ERR_INT_ACK__SHIFT 0x5
+#define UVD_SUVD_INT_ACK__SIT_FUNC_INT_ACK__SHIFT 0x6
+#define UVD_SUVD_INT_ACK__SIT_ERR_INT_ACK__SHIFT 0xb
+#define UVD_SUVD_INT_ACK__SMP_FUNC_INT_ACK__SHIFT 0xc
+#define UVD_SUVD_INT_ACK__SMP_ERR_INT_ACK__SHIFT 0x11
+#define UVD_SUVD_INT_ACK__SCM_FUNC_INT_ACK__SHIFT 0x12
+#define UVD_SUVD_INT_ACK__SCM_ERR_INT_ACK__SHIFT 0x17
+#define UVD_SUVD_INT_ACK__SDB_FUNC_INT_ACK__SHIFT 0x18
+#define UVD_SUVD_INT_ACK__SDB_ERR_INT_ACK__SHIFT 0x1d
+#define UVD_SUVD_INT_ACK__FBC_ERR_INT_ACK__SHIFT 0x1e
+#define UVD_SUVD_INT_ACK__SRE_FUNC_INT_ACK_MASK 0x0000001FL
+#define UVD_SUVD_INT_ACK__SRE_ERR_INT_ACK_MASK 0x00000020L
+#define UVD_SUVD_INT_ACK__SIT_FUNC_INT_ACK_MASK 0x000007C0L
+#define UVD_SUVD_INT_ACK__SIT_ERR_INT_ACK_MASK 0x00000800L
+#define UVD_SUVD_INT_ACK__SMP_FUNC_INT_ACK_MASK 0x0001F000L
+#define UVD_SUVD_INT_ACK__SMP_ERR_INT_ACK_MASK 0x00020000L
+#define UVD_SUVD_INT_ACK__SCM_FUNC_INT_ACK_MASK 0x007C0000L
+#define UVD_SUVD_INT_ACK__SCM_ERR_INT_ACK_MASK 0x00800000L
+#define UVD_SUVD_INT_ACK__SDB_FUNC_INT_ACK_MASK 0x1F000000L
+#define UVD_SUVD_INT_ACK__SDB_ERR_INT_ACK_MASK 0x20000000L
+#define UVD_SUVD_INT_ACK__FBC_ERR_INT_ACK_MASK 0x40000000L
+//UVD_ENC_VCPU_INT_EN
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN__SHIFT 0x0
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN__SHIFT 0x1
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN__SHIFT 0x2
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN_MASK 0x00000001L
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN_MASK 0x00000002L
+#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN_MASK 0x00000004L
+//UVD_ENC_VCPU_INT_STATUS
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR_INT__SHIFT 0x0
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR2_INT__SHIFT 0x1
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR3_INT__SHIFT 0x2
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR_INT_MASK 0x00000001L
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR2_INT_MASK 0x00000002L
+#define UVD_ENC_VCPU_INT_STATUS__DCE_UVD_SCAN_IN_BUFMGR3_INT_MASK 0x00000004L
+//UVD_ENC_VCPU_INT_ACK
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK__SHIFT 0x0
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK__SHIFT 0x1
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK__SHIFT 0x2
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK_MASK 0x00000001L
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK_MASK 0x00000002L
+#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK_MASK 0x00000004L
+//UVD_MASTINT_EN
+#define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
+#define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x1
+#define UVD_MASTINT_EN__SYS_EN__SHIFT 0x2
+#define UVD_MASTINT_EN__INT_OVERRUN__SHIFT 0x4
+#define UVD_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L
+#define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L
+#define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x00FFFFF0L
+//UVD_SYS_INT_EN
+#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1
+#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2
+#define UVD_SYS_INT_EN__CXW_WR_EN__SHIFT 0x3
+#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6
+#define UVD_SYS_INT_EN__LBSI_EN__SHIFT 0xb
+#define UVD_SYS_INT_EN__UDEC_EN__SHIFT 0xc
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN__SHIFT 0xd
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN__SHIFT 0xe
+#define UVD_SYS_INT_EN__SUVD_EN__SHIFT 0xf
+#define UVD_SYS_INT_EN__JOB_DONE_EN__SHIFT 0x10
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17
+#define UVD_SYS_INT_EN__IDCT_EN__SHIFT 0x18
+#define UVD_SYS_INT_EN__MPRD_EN__SHIFT 0x19
+#define UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN__SHIFT 0x1a
+#define UVD_SYS_INT_EN__CLK_SWT_EN__SHIFT 0x1b
+#define UVD_SYS_INT_EN__MIF_HWINT_EN__SHIFT 0x1c
+#define UVD_SYS_INT_EN__MPRD_ERR_EN__SHIFT 0x1d
+#define UVD_SYS_INT_EN__AVM_INT_EN__SHIFT 0x1f
+#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L
+#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L
+#define UVD_SYS_INT_EN__CXW_WR_EN_MASK 0x00000008L
+#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L
+#define UVD_SYS_INT_EN__LBSI_EN_MASK 0x00000800L
+#define UVD_SYS_INT_EN__UDEC_EN_MASK 0x00001000L
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_LEN_EN_MASK 0x00002000L
+#define UVD_SYS_INT_EN__LMI_AXI_UNSUPPORTED_ADR_ALIGN_EN_MASK 0x00004000L
+#define UVD_SYS_INT_EN__SUVD_EN_MASK 0x00008000L
+#define UVD_SYS_INT_EN__JOB_DONE_EN_MASK 0x00010000L
+#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L
+#define UVD_SYS_INT_EN__IDCT_EN_MASK 0x01000000L
+#define UVD_SYS_INT_EN__MPRD_EN_MASK 0x02000000L
+#define UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK 0x04000000L
+#define UVD_SYS_INT_EN__CLK_SWT_EN_MASK 0x08000000L
+#define UVD_SYS_INT_EN__MIF_HWINT_EN_MASK 0x10000000L
+#define UVD_SYS_INT_EN__MPRD_ERR_EN_MASK 0x20000000L
+#define UVD_SYS_INT_EN__AVM_INT_EN_MASK 0x80000000L
+//UVD_SYS_INT_STATUS
+#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1
+#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2
+#define UVD_SYS_INT_STATUS__CXW_WR_INT__SHIFT 0x3
+#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6
+#define UVD_SYS_INT_STATUS__LBSI_INT__SHIFT 0xb
+#define UVD_SYS_INT_STATUS__UDEC_INT__SHIFT 0xc
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT__SHIFT 0xd
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0xe
+#define UVD_SYS_INT_STATUS__SUVD_INT__SHIFT 0xf
+#define UVD_SYS_INT_STATUS__JOB_DONE_INT__SHIFT 0x10
+#define UVD_SYS_INT_STATUS__GPCOM_INT__SHIFT 0x12
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17
+#define UVD_SYS_INT_STATUS__IDCT_INT__SHIFT 0x18
+#define UVD_SYS_INT_STATUS__MPRD_INT__SHIFT 0x19
+#define UVD_SYS_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b
+#define UVD_SYS_INT_STATUS__MIF_HWINT__SHIFT 0x1c
+#define UVD_SYS_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d
+#define UVD_SYS_INT_STATUS__RASCNTL_VCPU_VCODEC_INT__SHIFT 0x1e
+#define UVD_SYS_INT_STATUS__AVM_INT__SHIFT 0x1f
+#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L
+#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L
+#define UVD_SYS_INT_STATUS__CXW_WR_INT_MASK 0x00000008L
+#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L
+#define UVD_SYS_INT_STATUS__LBSI_INT_MASK 0x00000800L
+#define UVD_SYS_INT_STATUS__UDEC_INT_MASK 0x00001000L
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_LEN_INT_MASK 0x00002000L
+#define UVD_SYS_INT_STATUS__LMI_AXI_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00004000L
+#define UVD_SYS_INT_STATUS__SUVD_INT_MASK 0x00008000L
+#define UVD_SYS_INT_STATUS__JOB_DONE_INT_MASK 0x00010000L
+#define UVD_SYS_INT_STATUS__GPCOM_INT_MASK 0x00040000L
+#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L
+#define UVD_SYS_INT_STATUS__IDCT_INT_MASK 0x01000000L
+#define UVD_SYS_INT_STATUS__MPRD_INT_MASK 0x02000000L
+#define UVD_SYS_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L
+#define UVD_SYS_INT_STATUS__MIF_HWINT_MASK 0x10000000L
+#define UVD_SYS_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L
+#define UVD_SYS_INT_STATUS__RASCNTL_VCPU_VCODEC_INT_MASK 0x40000000L
+#define UVD_SYS_INT_STATUS__AVM_INT_MASK 0x80000000L
+//UVD_SYS_INT_ACK
+#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1
+#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2
+#define UVD_SYS_INT_ACK__CXW_WR_ACK__SHIFT 0x3
+#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6
+#define UVD_SYS_INT_ACK__LBSI_ACK__SHIFT 0xb
+#define UVD_SYS_INT_ACK__UDEC_ACK__SHIFT 0xc
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK__SHIFT 0xd
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK__SHIFT 0xe
+#define UVD_SYS_INT_ACK__SUVD_ACK__SHIFT 0xf
+#define UVD_SYS_INT_ACK__JOB_DONE_ACK__SHIFT 0x10
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17
+#define UVD_SYS_INT_ACK__IDCT_ACK__SHIFT 0x18
+#define UVD_SYS_INT_ACK__MPRD_ACK__SHIFT 0x19
+#define UVD_SYS_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b
+#define UVD_SYS_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c
+#define UVD_SYS_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d
+#define UVD_SYS_INT_ACK__RASCNTL_VCPU_VCODEC_ACK__SHIFT 0x1e
+#define UVD_SYS_INT_ACK__AVM_INT_ACK__SHIFT 0x1f
+#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L
+#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L
+#define UVD_SYS_INT_ACK__CXW_WR_ACK_MASK 0x00000008L
+#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L
+#define UVD_SYS_INT_ACK__LBSI_ACK_MASK 0x00000800L
+#define UVD_SYS_INT_ACK__UDEC_ACK_MASK 0x00001000L
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_LEN_ACK_MASK 0x00002000L
+#define UVD_SYS_INT_ACK__LMI_AXI_UNSUPPORTED_ADR_ALIGN_ACK_MASK 0x00004000L
+#define UVD_SYS_INT_ACK__SUVD_ACK_MASK 0x00008000L
+#define UVD_SYS_INT_ACK__JOB_DONE_ACK_MASK 0x00010000L
+#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L
+#define UVD_SYS_INT_ACK__IDCT_ACK_MASK 0x01000000L
+#define UVD_SYS_INT_ACK__MPRD_ACK_MASK 0x02000000L
+#define UVD_SYS_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L
+#define UVD_SYS_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L
+#define UVD_SYS_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L
+#define UVD_SYS_INT_ACK__RASCNTL_VCPU_VCODEC_ACK_MASK 0x40000000L
+#define UVD_SYS_INT_ACK__AVM_INT_ACK_MASK 0x80000000L
+//UVD_JOB_DONE
+#define UVD_JOB_DONE__JOB_DONE__SHIFT 0x0
+#define UVD_JOB_DONE__JOB_DONE_MASK 0x00000003L
+//UVD_CBUF_ID
+#define UVD_CBUF_ID__CBUF_ID__SHIFT 0x0
+#define UVD_CBUF_ID__CBUF_ID_MASK 0xFFFFFFFFL
+//UVD_CONTEXT_ID
+#define UVD_CONTEXT_ID__CONTEXT_ID__SHIFT 0x0
+#define UVD_CONTEXT_ID__CONTEXT_ID_MASK 0xFFFFFFFFL
+//UVD_CONTEXT_ID2
+#define UVD_CONTEXT_ID2__CONTEXT_ID2__SHIFT 0x0
+#define UVD_CONTEXT_ID2__CONTEXT_ID2_MASK 0xFFFFFFFFL
+//UVD_NO_OP
+#define UVD_NO_OP__NO_OP__SHIFT 0x0
+#define UVD_NO_OP__NO_OP_MASK 0xFFFFFFFFL
+//UVD_RB_BASE_LO
+#define UVD_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI
+#define UVD_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE
+#define UVD_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO2
+#define UVD_RB_BASE_LO2__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO2__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI2
+#define UVD_RB_BASE_HI2__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI2__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE2
+#define UVD_RB_SIZE2__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE2__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO3
+#define UVD_RB_BASE_LO3__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO3__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI3
+#define UVD_RB_BASE_HI3__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI3__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE3
+#define UVD_RB_SIZE3__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE3__RB_SIZE_MASK 0x007FFFF0L
+//UVD_RB_BASE_LO4
+#define UVD_RB_BASE_LO4__RB_BASE_LO__SHIFT 0x6
+#define UVD_RB_BASE_LO4__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_RB_BASE_HI4
+#define UVD_RB_BASE_HI4__RB_BASE_HI__SHIFT 0x0
+#define UVD_RB_BASE_HI4__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_RB_SIZE4
+#define UVD_RB_SIZE4__RB_SIZE__SHIFT 0x4
+#define UVD_RB_SIZE4__RB_SIZE_MASK 0x007FFFF0L
+//UVD_OUT_RB_BASE_LO
+#define UVD_OUT_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define UVD_OUT_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_OUT_RB_BASE_HI
+#define UVD_OUT_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define UVD_OUT_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_OUT_RB_SIZE
+#define UVD_OUT_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_OUT_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L
+//UVD_IOV_ACTIVE_FCN_ID
+#define UVD_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define UVD_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define UVD_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000003FL
+#define UVD_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//UVD_IOV_MAILBOX
+#define UVD_IOV_MAILBOX__MAILBOX__SHIFT 0x0
+#define UVD_IOV_MAILBOX__MAILBOX_MASK 0xFFFFFFFFL
+//UVD_IOV_MAILBOX_RESP
+#define UVD_IOV_MAILBOX_RESP__RESP__SHIFT 0x0
+#define UVD_IOV_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL
+//UVD_RB_ARB_CTRL
+#define UVD_RB_ARB_CTRL__SRBM_DROP__SHIFT 0x0
+#define UVD_RB_ARB_CTRL__SRBM_DIS__SHIFT 0x1
+#define UVD_RB_ARB_CTRL__VCPU_DROP__SHIFT 0x2
+#define UVD_RB_ARB_CTRL__VCPU_DIS__SHIFT 0x3
+#define UVD_RB_ARB_CTRL__RBC_DROP__SHIFT 0x4
+#define UVD_RB_ARB_CTRL__RBC_DIS__SHIFT 0x5
+#define UVD_RB_ARB_CTRL__FWOFLD_DROP__SHIFT 0x6
+#define UVD_RB_ARB_CTRL__FWOFLD_DIS__SHIFT 0x7
+#define UVD_RB_ARB_CTRL__FAST_PATH_EN__SHIFT 0x8
+#define UVD_RB_ARB_CTRL__UVD_RB_DBG_EN__SHIFT 0x9
+#define UVD_RB_ARB_CTRL__SRBM_DROP_MASK 0x00000001L
+#define UVD_RB_ARB_CTRL__SRBM_DIS_MASK 0x00000002L
+#define UVD_RB_ARB_CTRL__VCPU_DROP_MASK 0x00000004L
+#define UVD_RB_ARB_CTRL__VCPU_DIS_MASK 0x00000008L
+#define UVD_RB_ARB_CTRL__RBC_DROP_MASK 0x00000010L
+#define UVD_RB_ARB_CTRL__RBC_DIS_MASK 0x00000020L
+#define UVD_RB_ARB_CTRL__FWOFLD_DROP_MASK 0x00000040L
+#define UVD_RB_ARB_CTRL__FWOFLD_DIS_MASK 0x00000080L
+#define UVD_RB_ARB_CTRL__FAST_PATH_EN_MASK 0x00000100L
+#define UVD_RB_ARB_CTRL__UVD_RB_DBG_EN_MASK 0x00000200L
+//UVD_CTX_INDEX
+#define UVD_CTX_INDEX__INDEX__SHIFT 0x0
+#define UVD_CTX_INDEX__INDEX_MASK 0x000001FFL
+//UVD_CTX_DATA
+#define UVD_CTX_DATA__DATA__SHIFT 0x0
+#define UVD_CTX_DATA__DATA_MASK 0xFFFFFFFFL
+//UVD_CXW_WR
+#define UVD_CXW_WR__DAT__SHIFT 0x0
+#define UVD_CXW_WR__STAT__SHIFT 0x1f
+#define UVD_CXW_WR__DAT_MASK 0x0FFFFFFFL
+#define UVD_CXW_WR__STAT_MASK 0x80000000L
+//UVD_CXW_WR_INT_ID
+#define UVD_CXW_WR_INT_ID__ID__SHIFT 0x0
+#define UVD_CXW_WR_INT_ID__ID_MASK 0x000000FFL
+//UVD_CXW_WR_INT_CTX_ID
+#define UVD_CXW_WR_INT_CTX_ID__ID__SHIFT 0x0
+#define UVD_CXW_WR_INT_CTX_ID__ID_MASK 0x0FFFFFFFL
+//UVD_CXW_INT_ID
+#define UVD_CXW_INT_ID__ID__SHIFT 0x0
+#define UVD_CXW_INT_ID__ID_MASK 0x000000FFL
+//UVD_MPEG2_ERROR
+#define UVD_MPEG2_ERROR__STATUS__SHIFT 0x0
+#define UVD_MPEG2_ERROR__STATUS_MASK 0xFFFFFFFFL
+//UVD_YBASE
+#define UVD_YBASE__DUM__SHIFT 0x0
+#define UVD_YBASE__DUM_MASK 0xFFFFFFFFL
+//UVD_UVBASE
+#define UVD_UVBASE__DUM__SHIFT 0x0
+#define UVD_UVBASE__DUM_MASK 0xFFFFFFFFL
+//UVD_PITCH
+#define UVD_PITCH__DUM__SHIFT 0x0
+#define UVD_PITCH__DUM_MASK 0xFFFFFFFFL
+//UVD_WIDTH
+#define UVD_WIDTH__DUM__SHIFT 0x0
+#define UVD_WIDTH__DUM_MASK 0xFFFFFFFFL
+//UVD_HEIGHT
+#define UVD_HEIGHT__DUM__SHIFT 0x0
+#define UVD_HEIGHT__DUM_MASK 0xFFFFFFFFL
+//UVD_PICCOUNT
+#define UVD_PICCOUNT__DUM__SHIFT 0x0
+#define UVD_PICCOUNT__DUM_MASK 0xFFFFFFFFL
+//UVD_MPRD_INITIAL_XY
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_X__SHIFT 0x0
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_Y__SHIFT 0x10
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_X_MASK 0x00000FFFL
+#define UVD_MPRD_INITIAL_XY__MPRD_SCREEN_Y_MASK 0x0FFF0000L
+//UVD_MPEG2_CTRL
+#define UVD_MPEG2_CTRL__EN__SHIFT 0x0
+#define UVD_MPEG2_CTRL__TRICK_MODE__SHIFT 0x1
+#define UVD_MPEG2_CTRL__NUM_MB_PER_JOB__SHIFT 0x10
+#define UVD_MPEG2_CTRL__EN_MASK 0x00000001L
+#define UVD_MPEG2_CTRL__TRICK_MODE_MASK 0x00000002L
+#define UVD_MPEG2_CTRL__NUM_MB_PER_JOB_MASK 0xFFFF0000L
+//UVD_MB_CTL_BUF_BASE
+#define UVD_MB_CTL_BUF_BASE__BASE__SHIFT 0x0
+#define UVD_MB_CTL_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//UVD_PIC_CTL_BUF_BASE
+#define UVD_PIC_CTL_BUF_BASE__BASE__SHIFT 0x0
+#define UVD_PIC_CTL_BUF_BASE__BASE_MASK 0xFFFFFFFFL
+//UVD_DXVA_BUF_SIZE
+#define UVD_DXVA_BUF_SIZE__PIC_SIZE__SHIFT 0x0
+#define UVD_DXVA_BUF_SIZE__MB_SIZE__SHIFT 0x10
+#define UVD_DXVA_BUF_SIZE__PIC_SIZE_MASK 0x0000FFFFL
+#define UVD_DXVA_BUF_SIZE__MB_SIZE_MASK 0xFFFF0000L
+//UVD_SCRATCH_NP
+#define UVD_SCRATCH_NP__DATA__SHIFT 0x0
+#define UVD_SCRATCH_NP__DATA_MASK 0xFFFFFFFFL
+//UVD_CLK_SWT_HANDSHAKE
+#define UVD_CLK_SWT_HANDSHAKE__CLK_SWT_TYPE__SHIFT 0x0
+#define UVD_CLK_SWT_HANDSHAKE__CLK_DOMAIN_SWT__SHIFT 0x8
+#define UVD_CLK_SWT_HANDSHAKE__CLK_SWT_TYPE_MASK 0x00000003L
+#define UVD_CLK_SWT_HANDSHAKE__CLK_DOMAIN_SWT_MASK 0x00000300L
+//UVD_GP_SCRATCH0
+#define UVD_GP_SCRATCH0__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH1
+#define UVD_GP_SCRATCH1__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH2
+#define UVD_GP_SCRATCH2__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH3
+#define UVD_GP_SCRATCH3__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH4
+#define UVD_GP_SCRATCH4__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH5
+#define UVD_GP_SCRATCH5__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH6
+#define UVD_GP_SCRATCH6__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH7
+#define UVD_GP_SCRATCH7__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH8
+#define UVD_GP_SCRATCH8__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH8__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH9
+#define UVD_GP_SCRATCH9__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH9__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH10
+#define UVD_GP_SCRATCH10__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH10__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH11
+#define UVD_GP_SCRATCH11__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH11__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH12
+#define UVD_GP_SCRATCH12__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH12__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH13
+#define UVD_GP_SCRATCH13__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH13__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH14
+#define UVD_GP_SCRATCH14__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH14__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH15
+#define UVD_GP_SCRATCH15__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH15__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH16
+#define UVD_GP_SCRATCH16__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH16__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH17
+#define UVD_GP_SCRATCH17__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH17__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH18
+#define UVD_GP_SCRATCH18__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH18__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH19
+#define UVD_GP_SCRATCH19__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH19__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH20
+#define UVD_GP_SCRATCH20__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH20__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH21
+#define UVD_GP_SCRATCH21__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH21__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH22
+#define UVD_GP_SCRATCH22__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH22__DATA_MASK 0xFFFFFFFFL
+//UVD_GP_SCRATCH23
+#define UVD_GP_SCRATCH23__DATA__SHIFT 0x0
+#define UVD_GP_SCRATCH23__DATA_MASK 0xFFFFFFFFL
+//UVD_AUDIO_RB_BASE_LO
+#define UVD_AUDIO_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6
+#define UVD_AUDIO_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L
+//UVD_AUDIO_RB_BASE_HI
+#define UVD_AUDIO_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define UVD_AUDIO_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL
+//UVD_AUDIO_RB_SIZE
+#define UVD_AUDIO_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_AUDIO_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L
+//UVD_VCPU_INT_STATUS2
+#define UVD_VCPU_INT_STATUS2__SW_RB6_INT__SHIFT 0x0
+#define UVD_VCPU_INT_STATUS2__RASCNTL_VCPU_VCODEC_INT__SHIFT 0x15
+#define UVD_VCPU_INT_STATUS2__SW_RB6_INT_MASK 0x00000001L
+#define UVD_VCPU_INT_STATUS2__RASCNTL_VCPU_VCODEC_INT_MASK 0x00200000L
+//UVD_VCPU_INT_ACK2
+#define UVD_VCPU_INT_ACK2__SW_RB6_INT_ACK__SHIFT 0x0
+#define UVD_VCPU_INT_ACK2__RASCNTL_VCPU_VCODEC_ACK__SHIFT 0x16
+#define UVD_VCPU_INT_ACK2__SW_RB6_INT_ACK_MASK 0x00000001L
+#define UVD_VCPU_INT_ACK2__RASCNTL_VCPU_VCODEC_ACK_MASK 0x00400000L
+//UVD_VCPU_INT_EN2
+#define UVD_VCPU_INT_EN2__SW_RB6_INT_EN__SHIFT 0x0
+#define UVD_VCPU_INT_EN2__RASCNTL_VCPU_VCODEC_EN__SHIFT 0x1
+#define UVD_VCPU_INT_EN2__SW_RB6_INT_EN_MASK 0x00000001L
+#define UVD_VCPU_INT_EN2__RASCNTL_VCPU_VCODEC_EN_MASK 0x00000002L
+//UVD_SUVD_CGC_STATUS2
+#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK__SHIFT 0x0
+#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK__SHIFT 0x1
+#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK__SHIFT 0x3
+#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK__SHIFT 0x4
+#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK__SHIFT 0x5
+#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK__SHIFT 0x6
+#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK__SHIFT 0x7
+#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK__SHIFT 0x8
+#define UVD_SUVD_CGC_STATUS2__SRE_AV1_ENC_DCLK__SHIFT 0x9
+#define UVD_SUVD_CGC_STATUS2__CDEFE_DCLK__SHIFT 0xa
+#define UVD_SUVD_CGC_STATUS2__SIT0_DCLK__SHIFT 0xb
+#define UVD_SUVD_CGC_STATUS2__SIT1_DCLK__SHIFT 0xc
+#define UVD_SUVD_CGC_STATUS2__SIT2_DCLK__SHIFT 0xd
+#define UVD_SUVD_CGC_STATUS2__FBC_PCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_STATUS2__FBC_CCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_STATUS2__SMPA_VCLK_MASK 0x00000001L
+#define UVD_SUVD_CGC_STATUS2__SMPA_DCLK_MASK 0x00000002L
+#define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK_MASK 0x00000008L
+#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK_MASK 0x00000010L
+#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK_MASK 0x00000020L
+#define UVD_SUVD_CGC_STATUS2__MPC1_DCLK_MASK 0x00000040L
+#define UVD_SUVD_CGC_STATUS2__MPC1_SCLK_MASK 0x00000080L
+#define UVD_SUVD_CGC_STATUS2__MPC1_VCLK_MASK 0x00000100L
+#define UVD_SUVD_CGC_STATUS2__SRE_AV1_ENC_DCLK_MASK 0x00000200L
+#define UVD_SUVD_CGC_STATUS2__CDEFE_DCLK_MASK 0x00000400L
+#define UVD_SUVD_CGC_STATUS2__SIT0_DCLK_MASK 0x00000800L
+#define UVD_SUVD_CGC_STATUS2__SIT1_DCLK_MASK 0x00001000L
+#define UVD_SUVD_CGC_STATUS2__SIT2_DCLK_MASK 0x00002000L
+#define UVD_SUVD_CGC_STATUS2__FBC_PCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_STATUS2__FBC_CCLK_MASK 0x20000000L
+//UVD_SUVD_INT_STATUS2
+#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT__SHIFT 0x0
+#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT__SHIFT 0x5
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT__SHIFT 0x6
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT__SHIFT 0xb
+#define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT_MASK 0x0000001FL
+#define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT_MASK 0x00000020L
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT_MASK 0x000007C0L
+#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT_MASK 0x00000800L
+//UVD_SUVD_INT_EN2
+#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN__SHIFT 0x0
+#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN__SHIFT 0x5
+#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN__SHIFT 0x6
+#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN__SHIFT 0xb
+#define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN_MASK 0x0000001FL
+#define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN_MASK 0x00000020L
+#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN_MASK 0x000007C0L
+#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN_MASK 0x00000800L
+//UVD_SUVD_INT_ACK2
+#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK__SHIFT 0x0
+#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK__SHIFT 0x5
+#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK__SHIFT 0x6
+#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK__SHIFT 0xb
+#define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK_MASK 0x0000001FL
+#define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK_MASK 0x00000020L
+#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK_MASK 0x000007C0L
+#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK_MASK 0x00000800L
+//UVD_STATUS
+#define UVD_STATUS__RBC_BUSY__SHIFT 0x0
+#define UVD_STATUS__VCPU_REPORT__SHIFT 0x1
+#define UVD_STATUS__FILL_0__SHIFT 0x8
+#define UVD_STATUS__RBC_ACCESS_GPCOM__SHIFT 0x10
+#define UVD_STATUS__DRM_BUSY__SHIFT 0x11
+#define UVD_STATUS__FILL_1__SHIFT 0x12
+#define UVD_STATUS__SYS_GPCOM_REQ__SHIFT 0x1f
+#define UVD_STATUS__RBC_BUSY_MASK 0x00000001L
+#define UVD_STATUS__VCPU_REPORT_MASK 0x000000FEL
+#define UVD_STATUS__FILL_0_MASK 0x0000FF00L
+#define UVD_STATUS__RBC_ACCESS_GPCOM_MASK 0x00010000L
+#define UVD_STATUS__DRM_BUSY_MASK 0x00020000L
+#define UVD_STATUS__FILL_1_MASK 0x7FFC0000L
+#define UVD_STATUS__SYS_GPCOM_REQ_MASK 0x80000000L
+//UVD_ENC_PIPE_BUSY
+#define UVD_ENC_PIPE_BUSY__IME_BUSY__SHIFT 0x0
+#define UVD_ENC_PIPE_BUSY__SMP_BUSY__SHIFT 0x1
+#define UVD_ENC_PIPE_BUSY__SIT_BUSY__SHIFT 0x2
+#define UVD_ENC_PIPE_BUSY__SDB_BUSY__SHIFT 0x3
+#define UVD_ENC_PIPE_BUSY__ENT_BUSY__SHIFT 0x4
+#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY__SHIFT 0x5
+#define UVD_ENC_PIPE_BUSY__LCM_BUSY__SHIFT 0x6
+#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY__SHIFT 0x7
+#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY__SHIFT 0x8
+#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY__SHIFT 0x9
+#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY__SHIFT 0xa
+#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY__SHIFT 0xb
+#define UVD_ENC_PIPE_BUSY__EFC_BUSY__SHIFT 0xc
+#define UVD_ENC_PIPE_BUSY__MDM_PPU_BUSY__SHIFT 0xd
+#define UVD_ENC_PIPE_BUSY__MIF_AUTODMA_BUSY__SHIFT 0xe
+#define UVD_ENC_PIPE_BUSY__CDEFE_BUSY__SHIFT 0xf
+#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY__SHIFT 0x10
+#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY__SHIFT 0x11
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY__SHIFT 0x12
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY__SHIFT 0x13
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY__SHIFT 0x14
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY__SHIFT 0x15
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY__SHIFT 0x16
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY__SHIFT 0x17
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY__SHIFT 0x18
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY__SHIFT 0x19
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY__SHIFT 0x1a
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY__SHIFT 0x1b
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY__SHIFT 0x1c
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY__SHIFT 0x1d
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY__SHIFT 0x1e
+#define UVD_ENC_PIPE_BUSY__SAOE_BUSY__SHIFT 0x1f
+#define UVD_ENC_PIPE_BUSY__IME_BUSY_MASK 0x00000001L
+#define UVD_ENC_PIPE_BUSY__SMP_BUSY_MASK 0x00000002L
+#define UVD_ENC_PIPE_BUSY__SIT_BUSY_MASK 0x00000004L
+#define UVD_ENC_PIPE_BUSY__SDB_BUSY_MASK 0x00000008L
+#define UVD_ENC_PIPE_BUSY__ENT_BUSY_MASK 0x00000010L
+#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY_MASK 0x00000020L
+#define UVD_ENC_PIPE_BUSY__LCM_BUSY_MASK 0x00000040L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY_MASK 0x00000080L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY_MASK 0x00000100L
+#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY_MASK 0x00000200L
+#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY_MASK 0x00000400L
+#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY_MASK 0x00000800L
+#define UVD_ENC_PIPE_BUSY__EFC_BUSY_MASK 0x00001000L
+#define UVD_ENC_PIPE_BUSY__MDM_PPU_BUSY_MASK 0x00002000L
+#define UVD_ENC_PIPE_BUSY__MIF_AUTODMA_BUSY_MASK 0x00004000L
+#define UVD_ENC_PIPE_BUSY__CDEFE_BUSY_MASK 0x00008000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY_MASK 0x00010000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY_MASK 0x00020000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY_MASK 0x00040000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY_MASK 0x00080000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY_MASK 0x00100000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY_MASK 0x00200000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY_MASK 0x00400000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY_MASK 0x00800000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY_MASK 0x01000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY_MASK 0x02000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY_MASK 0x04000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY_MASK 0x08000000L
+#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY_MASK 0x10000000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY_MASK 0x20000000L
+#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY_MASK 0x40000000L
+#define UVD_ENC_PIPE_BUSY__SAOE_BUSY_MASK 0x80000000L
+//UVD_FW_POWER_STATUS
+#define UVD_FW_POWER_STATUS__UVDF_PWR_OFF__SHIFT 0x0
+#define UVD_FW_POWER_STATUS__UVDTC_PWR_OFF__SHIFT 0x1
+#define UVD_FW_POWER_STATUS__UVDB_PWR_OFF__SHIFT 0x2
+#define UVD_FW_POWER_STATUS__UVDTA_PWR_OFF__SHIFT 0x3
+#define UVD_FW_POWER_STATUS__UVDTD_PWR_OFF__SHIFT 0x4
+#define UVD_FW_POWER_STATUS__UVDTE_PWR_OFF__SHIFT 0x5
+#define UVD_FW_POWER_STATUS__UVDE_PWR_OFF__SHIFT 0x6
+#define UVD_FW_POWER_STATUS__UVDAB_PWR_OFF__SHIFT 0x7
+#define UVD_FW_POWER_STATUS__UVDTB_PWR_OFF__SHIFT 0x8
+#define UVD_FW_POWER_STATUS__UVDNA_PWR_OFF__SHIFT 0x9
+#define UVD_FW_POWER_STATUS__UVDNB_PWR_OFF__SHIFT 0xa
+#define UVD_FW_POWER_STATUS__UVDF_PWR_OFF_MASK 0x00000001L
+#define UVD_FW_POWER_STATUS__UVDTC_PWR_OFF_MASK 0x00000002L
+#define UVD_FW_POWER_STATUS__UVDB_PWR_OFF_MASK 0x00000004L
+#define UVD_FW_POWER_STATUS__UVDTA_PWR_OFF_MASK 0x00000008L
+#define UVD_FW_POWER_STATUS__UVDTD_PWR_OFF_MASK 0x00000010L
+#define UVD_FW_POWER_STATUS__UVDTE_PWR_OFF_MASK 0x00000020L
+#define UVD_FW_POWER_STATUS__UVDE_PWR_OFF_MASK 0x00000040L
+#define UVD_FW_POWER_STATUS__UVDAB_PWR_OFF_MASK 0x00000080L
+#define UVD_FW_POWER_STATUS__UVDTB_PWR_OFF_MASK 0x00000100L
+#define UVD_FW_POWER_STATUS__UVDNA_PWR_OFF_MASK 0x00000200L
+#define UVD_FW_POWER_STATUS__UVDNB_PWR_OFF_MASK 0x00000400L
+//UVD_CNTL
+#define UVD_CNTL__MIF_WR_LOW_THRESHOLD_BP__SHIFT 0x11
+#define UVD_CNTL__SUVD_EN__SHIFT 0x13
+#define UVD_CNTL__CABAC_MB_ACC__SHIFT 0x1c
+#define UVD_CNTL__LRBBM_SAFE_SYNC_DIS__SHIFT 0x1f
+#define UVD_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK 0x00020000L
+#define UVD_CNTL__SUVD_EN_MASK 0x00080000L
+#define UVD_CNTL__CABAC_MB_ACC_MASK 0x10000000L
+#define UVD_CNTL__LRBBM_SAFE_SYNC_DIS_MASK 0x80000000L
+//UVD_SOFT_RESET
+#define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x0
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x1
+#define UVD_SOFT_RESET__LMI_SOFT_RESET__SHIFT 0x2
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET__SHIFT 0x3
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET__SHIFT 0x4
+#define UVD_SOFT_RESET__CXW_SOFT_RESET__SHIFT 0x6
+#define UVD_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x7
+#define UVD_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x8
+#define UVD_SOFT_RESET__EFC_SOFT_RESET__SHIFT 0x9
+#define UVD_SOFT_RESET__IH_SOFT_RESET__SHIFT 0xa
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET__SHIFT 0xb
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET__SHIFT 0xc
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET__SHIFT 0xd
+#define UVD_SOFT_RESET__SPH_SOFT_RESET__SHIFT 0xe
+#define UVD_SOFT_RESET__MIF_SOFT_RESET__SHIFT 0xf
+#define UVD_SOFT_RESET__LCM_SOFT_RESET__SHIFT 0x10
+#define UVD_SOFT_RESET__SUVD_SOFT_RESET__SHIFT 0x11
+#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS__SHIFT 0x12
+#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS__SHIFT 0x13
+#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS__SHIFT 0x14
+#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS__SHIFT 0x15
+#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS__SHIFT 0x16
+#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS__SHIFT 0x17
+#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS__SHIFT 0x18
+#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS__SHIFT 0x19
+#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS__SHIFT 0x1a
+#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS__SHIFT 0x1b
+#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS__SHIFT 0x1c
+#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS__SHIFT 0x1d
+#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS__SHIFT 0x1e
+#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS__SHIFT 0x1f
+#define UVD_SOFT_RESET__RBC_SOFT_RESET_MASK 0x00000001L
+#define UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK 0x00000002L
+#define UVD_SOFT_RESET__LMI_SOFT_RESET_MASK 0x00000004L
+#define UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK 0x00000008L
+#define UVD_SOFT_RESET__UDEC_SOFT_RESET_MASK 0x00000010L
+#define UVD_SOFT_RESET__CXW_SOFT_RESET_MASK 0x00000040L
+#define UVD_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000080L
+#define UVD_SOFT_RESET__MPC_SOFT_RESET_MASK 0x00000100L
+#define UVD_SOFT_RESET__EFC_SOFT_RESET_MASK 0x00000200L
+#define UVD_SOFT_RESET__IH_SOFT_RESET_MASK 0x00000400L
+#define UVD_SOFT_RESET__MPRD_SOFT_RESET_MASK 0x00000800L
+#define UVD_SOFT_RESET__IDCT_SOFT_RESET_MASK 0x00001000L
+#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK 0x00002000L
+#define UVD_SOFT_RESET__SPH_SOFT_RESET_MASK 0x00004000L
+#define UVD_SOFT_RESET__MIF_SOFT_RESET_MASK 0x00008000L
+#define UVD_SOFT_RESET__LCM_SOFT_RESET_MASK 0x00010000L
+#define UVD_SOFT_RESET__SUVD_SOFT_RESET_MASK 0x00020000L
+#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS_MASK 0x00040000L
+#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS_MASK 0x00080000L
+#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS_MASK 0x00100000L
+#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS_MASK 0x00200000L
+#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS_MASK 0x00400000L
+#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS_MASK 0x00800000L
+#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS_MASK 0x01000000L
+#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS_MASK 0x02000000L
+#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS_MASK 0x04000000L
+#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS_MASK 0x08000000L
+#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS_MASK 0x10000000L
+#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS_MASK 0x20000000L
+#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS_MASK 0x40000000L
+#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS_MASK 0x80000000L
+//UVD_SOFT_RESET2
+#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET__SHIFT 0x0
+#define UVD_SOFT_RESET2__PPU_SOFT_RESET__SHIFT 0x1
+#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS__SHIFT 0x10
+#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET_MASK 0x00000001L
+#define UVD_SOFT_RESET2__PPU_SOFT_RESET_MASK 0x00000002L
+#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS_MASK 0x00010000L
+#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_MMSCH_SOFT_RESET
+#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET__SHIFT 0x0
+#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x1
+#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK__SHIFT 0x1f
+#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET_MASK 0x00000001L
+#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000002L
+#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK_MASK 0x80000000L
+//UVD_WIG_CTRL
+#define UVD_WIG_CTRL__AVM_SOFT_RESET__SHIFT 0x0
+#define UVD_WIG_CTRL__ACAP_SOFT_RESET__SHIFT 0x1
+#define UVD_WIG_CTRL__WIG_SOFT_RESET__SHIFT 0x2
+#define UVD_WIG_CTRL__WIG_REGCLK_FORCE_ON__SHIFT 0x3
+#define UVD_WIG_CTRL__AVM_REGCLK_FORCE_ON__SHIFT 0x4
+#define UVD_WIG_CTRL__AVM_SOFT_RESET_MASK 0x00000001L
+#define UVD_WIG_CTRL__ACAP_SOFT_RESET_MASK 0x00000002L
+#define UVD_WIG_CTRL__WIG_SOFT_RESET_MASK 0x00000004L
+#define UVD_WIG_CTRL__WIG_REGCLK_FORCE_ON_MASK 0x00000008L
+#define UVD_WIG_CTRL__AVM_REGCLK_FORCE_ON_MASK 0x00000010L
+//UVD_CGC_STATUS
+#define UVD_CGC_STATUS__SYS_SCLK__SHIFT 0x0
+#define UVD_CGC_STATUS__SYS_DCLK__SHIFT 0x1
+#define UVD_CGC_STATUS__SYS_VCLK__SHIFT 0x2
+#define UVD_CGC_STATUS__UDEC_SCLK__SHIFT 0x3
+#define UVD_CGC_STATUS__UDEC_DCLK__SHIFT 0x4
+#define UVD_CGC_STATUS__UDEC_VCLK__SHIFT 0x5
+#define UVD_CGC_STATUS__MPEG2_SCLK__SHIFT 0x6
+#define UVD_CGC_STATUS__MPEG2_DCLK__SHIFT 0x7
+#define UVD_CGC_STATUS__MPEG2_VCLK__SHIFT 0x8
+#define UVD_CGC_STATUS__REGS_SCLK__SHIFT 0x9
+#define UVD_CGC_STATUS__REGS_VCLK__SHIFT 0xa
+#define UVD_CGC_STATUS__RBC_SCLK__SHIFT 0xb
+#define UVD_CGC_STATUS__LMI_MC_SCLK__SHIFT 0xc
+#define UVD_CGC_STATUS__LMI_UMC_SCLK__SHIFT 0xd
+#define UVD_CGC_STATUS__IDCT_SCLK__SHIFT 0xe
+#define UVD_CGC_STATUS__IDCT_VCLK__SHIFT 0xf
+#define UVD_CGC_STATUS__MPRD_SCLK__SHIFT 0x10
+#define UVD_CGC_STATUS__MPRD_DCLK__SHIFT 0x11
+#define UVD_CGC_STATUS__MPRD_VCLK__SHIFT 0x12
+#define UVD_CGC_STATUS__MPC_SCLK__SHIFT 0x13
+#define UVD_CGC_STATUS__MPC_DCLK__SHIFT 0x14
+#define UVD_CGC_STATUS__LBSI_SCLK__SHIFT 0x15
+#define UVD_CGC_STATUS__LBSI_VCLK__SHIFT 0x16
+#define UVD_CGC_STATUS__LRBBM_SCLK__SHIFT 0x17
+#define UVD_CGC_STATUS__WCB_SCLK__SHIFT 0x18
+#define UVD_CGC_STATUS__VCPU_SCLK__SHIFT 0x19
+#define UVD_CGC_STATUS__VCPU_VCLK__SHIFT 0x1a
+#define UVD_CGC_STATUS__MMSCH_SCLK__SHIFT 0x1b
+#define UVD_CGC_STATUS__MMSCH_VCLK__SHIFT 0x1c
+#define UVD_CGC_STATUS__ALL_ENC_ACTIVE__SHIFT 0x1d
+#define UVD_CGC_STATUS__LRBBM_DCLK__SHIFT 0x1e
+#define UVD_CGC_STATUS__ALL_DEC_ACTIVE__SHIFT 0x1f
+#define UVD_CGC_STATUS__SYS_SCLK_MASK 0x00000001L
+#define UVD_CGC_STATUS__SYS_DCLK_MASK 0x00000002L
+#define UVD_CGC_STATUS__SYS_VCLK_MASK 0x00000004L
+#define UVD_CGC_STATUS__UDEC_SCLK_MASK 0x00000008L
+#define UVD_CGC_STATUS__UDEC_DCLK_MASK 0x00000010L
+#define UVD_CGC_STATUS__UDEC_VCLK_MASK 0x00000020L
+#define UVD_CGC_STATUS__MPEG2_SCLK_MASK 0x00000040L
+#define UVD_CGC_STATUS__MPEG2_DCLK_MASK 0x00000080L
+#define UVD_CGC_STATUS__MPEG2_VCLK_MASK 0x00000100L
+#define UVD_CGC_STATUS__REGS_SCLK_MASK 0x00000200L
+#define UVD_CGC_STATUS__REGS_VCLK_MASK 0x00000400L
+#define UVD_CGC_STATUS__RBC_SCLK_MASK 0x00000800L
+#define UVD_CGC_STATUS__LMI_MC_SCLK_MASK 0x00001000L
+#define UVD_CGC_STATUS__LMI_UMC_SCLK_MASK 0x00002000L
+#define UVD_CGC_STATUS__IDCT_SCLK_MASK 0x00004000L
+#define UVD_CGC_STATUS__IDCT_VCLK_MASK 0x00008000L
+#define UVD_CGC_STATUS__MPRD_SCLK_MASK 0x00010000L
+#define UVD_CGC_STATUS__MPRD_DCLK_MASK 0x00020000L
+#define UVD_CGC_STATUS__MPRD_VCLK_MASK 0x00040000L
+#define UVD_CGC_STATUS__MPC_SCLK_MASK 0x00080000L
+#define UVD_CGC_STATUS__MPC_DCLK_MASK 0x00100000L
+#define UVD_CGC_STATUS__LBSI_SCLK_MASK 0x00200000L
+#define UVD_CGC_STATUS__LBSI_VCLK_MASK 0x00400000L
+#define UVD_CGC_STATUS__LRBBM_SCLK_MASK 0x00800000L
+#define UVD_CGC_STATUS__WCB_SCLK_MASK 0x01000000L
+#define UVD_CGC_STATUS__VCPU_SCLK_MASK 0x02000000L
+#define UVD_CGC_STATUS__VCPU_VCLK_MASK 0x04000000L
+#define UVD_CGC_STATUS__MMSCH_SCLK_MASK 0x08000000L
+#define UVD_CGC_STATUS__MMSCH_VCLK_MASK 0x10000000L
+#define UVD_CGC_STATUS__ALL_ENC_ACTIVE_MASK 0x20000000L
+#define UVD_CGC_STATUS__LRBBM_DCLK_MASK 0x40000000L
+#define UVD_CGC_STATUS__ALL_DEC_ACTIVE_MASK 0x80000000L
+//UVD_CGC_UDEC_STATUS
+#define UVD_CGC_UDEC_STATUS__RE_SCLK__SHIFT 0x0
+#define UVD_CGC_UDEC_STATUS__RE_DCLK__SHIFT 0x1
+#define UVD_CGC_UDEC_STATUS__RE_VCLK__SHIFT 0x2
+#define UVD_CGC_UDEC_STATUS__CM_SCLK__SHIFT 0x3
+#define UVD_CGC_UDEC_STATUS__CM_DCLK__SHIFT 0x4
+#define UVD_CGC_UDEC_STATUS__CM_VCLK__SHIFT 0x5
+#define UVD_CGC_UDEC_STATUS__IT_SCLK__SHIFT 0x6
+#define UVD_CGC_UDEC_STATUS__IT_DCLK__SHIFT 0x7
+#define UVD_CGC_UDEC_STATUS__IT_VCLK__SHIFT 0x8
+#define UVD_CGC_UDEC_STATUS__DB_SCLK__SHIFT 0x9
+#define UVD_CGC_UDEC_STATUS__DB_DCLK__SHIFT 0xa
+#define UVD_CGC_UDEC_STATUS__DB_VCLK__SHIFT 0xb
+#define UVD_CGC_UDEC_STATUS__MP_SCLK__SHIFT 0xc
+#define UVD_CGC_UDEC_STATUS__MP_DCLK__SHIFT 0xd
+#define UVD_CGC_UDEC_STATUS__MP_VCLK__SHIFT 0xe
+#define UVD_CGC_UDEC_STATUS__RE_SCLK_MASK 0x00000001L
+#define UVD_CGC_UDEC_STATUS__RE_DCLK_MASK 0x00000002L
+#define UVD_CGC_UDEC_STATUS__RE_VCLK_MASK 0x00000004L
+#define UVD_CGC_UDEC_STATUS__CM_SCLK_MASK 0x00000008L
+#define UVD_CGC_UDEC_STATUS__CM_DCLK_MASK 0x00000010L
+#define UVD_CGC_UDEC_STATUS__CM_VCLK_MASK 0x00000020L
+#define UVD_CGC_UDEC_STATUS__IT_SCLK_MASK 0x00000040L
+#define UVD_CGC_UDEC_STATUS__IT_DCLK_MASK 0x00000080L
+#define UVD_CGC_UDEC_STATUS__IT_VCLK_MASK 0x00000100L
+#define UVD_CGC_UDEC_STATUS__DB_SCLK_MASK 0x00000200L
+#define UVD_CGC_UDEC_STATUS__DB_DCLK_MASK 0x00000400L
+#define UVD_CGC_UDEC_STATUS__DB_VCLK_MASK 0x00000800L
+#define UVD_CGC_UDEC_STATUS__MP_SCLK_MASK 0x00001000L
+#define UVD_CGC_UDEC_STATUS__MP_DCLK_MASK 0x00002000L
+#define UVD_CGC_UDEC_STATUS__MP_VCLK_MASK 0x00004000L
+//UVD_SUVD_CGC_STATUS
+#define UVD_SUVD_CGC_STATUS__SRE_VCLK__SHIFT 0x0
+#define UVD_SUVD_CGC_STATUS__SRE_DCLK__SHIFT 0x1
+#define UVD_SUVD_CGC_STATUS__SIT_DCLK__SHIFT 0x2
+#define UVD_SUVD_CGC_STATUS__SMP_DCLK__SHIFT 0x3
+#define UVD_SUVD_CGC_STATUS__SCM_DCLK__SHIFT 0x4
+#define UVD_SUVD_CGC_STATUS__SDB_DCLK__SHIFT 0x5
+#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK__SHIFT 0x6
+#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK__SHIFT 0x7
+#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK__SHIFT 0x8
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK__SHIFT 0x9
+#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK__SHIFT 0xa
+#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK__SHIFT 0xb
+#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK__SHIFT 0xc
+#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK__SHIFT 0xd
+#define UVD_SUVD_CGC_STATUS__SCLR_DCLK__SHIFT 0xe
+#define UVD_SUVD_CGC_STATUS__UVD_SC__SHIFT 0xf
+#define UVD_SUVD_CGC_STATUS__ENT_DCLK__SHIFT 0x10
+#define UVD_SUVD_CGC_STATUS__IME_DCLK__SHIFT 0x11
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK__SHIFT 0x12
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK__SHIFT 0x13
+#define UVD_SUVD_CGC_STATUS__SITE_DCLK__SHIFT 0x14
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK__SHIFT 0x15
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK__SHIFT 0x16
+#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK__SHIFT 0x17
+#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK__SHIFT 0x18
+#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK__SHIFT 0x19
+#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK__SHIFT 0x1a
+#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK__SHIFT 0x1b
+#define UVD_SUVD_CGC_STATUS__EFC_DCLK__SHIFT 0x1c
+#define UVD_SUVD_CGC_STATUS__SAOE_DCLK__SHIFT 0x1d
+#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK__SHIFT 0x1e
+#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK__SHIFT 0x1f
+#define UVD_SUVD_CGC_STATUS__SRE_VCLK_MASK 0x00000001L
+#define UVD_SUVD_CGC_STATUS__SRE_DCLK_MASK 0x00000002L
+#define UVD_SUVD_CGC_STATUS__SIT_DCLK_MASK 0x00000004L
+#define UVD_SUVD_CGC_STATUS__SMP_DCLK_MASK 0x00000008L
+#define UVD_SUVD_CGC_STATUS__SCM_DCLK_MASK 0x00000010L
+#define UVD_SUVD_CGC_STATUS__SDB_DCLK_MASK 0x00000020L
+#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK_MASK 0x00000040L
+#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK_MASK 0x00000080L
+#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK_MASK 0x00000100L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK_MASK 0x00000200L
+#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK_MASK 0x00000400L
+#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK_MASK 0x00000800L
+#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK_MASK 0x00001000L
+#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK_MASK 0x00002000L
+#define UVD_SUVD_CGC_STATUS__SCLR_DCLK_MASK 0x00004000L
+#define UVD_SUVD_CGC_STATUS__UVD_SC_MASK 0x00008000L
+#define UVD_SUVD_CGC_STATUS__ENT_DCLK_MASK 0x00010000L
+#define UVD_SUVD_CGC_STATUS__IME_DCLK_MASK 0x00020000L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK_MASK 0x00040000L
+#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK_MASK 0x00080000L
+#define UVD_SUVD_CGC_STATUS__SITE_DCLK_MASK 0x00100000L
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK_MASK 0x00200000L
+#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK_MASK 0x00400000L
+#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK_MASK 0x00800000L
+#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK_MASK 0x01000000L
+#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK_MASK 0x02000000L
+#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK_MASK 0x04000000L
+#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK_MASK 0x08000000L
+#define UVD_SUVD_CGC_STATUS__EFC_DCLK_MASK 0x10000000L
+#define UVD_SUVD_CGC_STATUS__SAOE_DCLK_MASK 0x20000000L
+#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK_MASK 0x40000000L
+#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK_MASK 0x80000000L
+//UVD_GPCOM_VCPU_CMD
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND__SHIFT 0x0
+#define UVD_GPCOM_VCPU_CMD__CMD__SHIFT 0x1
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE__SHIFT 0x1f
+#define UVD_GPCOM_VCPU_CMD__CMD_SEND_MASK 0x00000001L
+#define UVD_GPCOM_VCPU_CMD__CMD_MASK 0x7FFFFFFEL
+#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE_MASK 0x80000000L
+
+
+// addressBlock: aid_uvd0_ecpudec
+//UVD_VCPU_CACHE_OFFSET0
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE0
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET1
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE1
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET2
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE2
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET3
+#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE3
+#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET4
+#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE4
+#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET5
+#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE5
+#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET6
+#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE6
+#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET7
+#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE7
+#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_OFFSET8
+#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8__SHIFT 0x0
+#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8_MASK 0x001FFFFFL
+//UVD_VCPU_CACHE_SIZE8
+#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8__SHIFT 0x0
+#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8_MASK 0x001FFFFFL
+//UVD_VCPU_NONCACHE_OFFSET0
+#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0_MASK 0x01FFFFFFL
+//UVD_VCPU_NONCACHE_SIZE0
+#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0_MASK 0x001FFFFFL
+//UVD_VCPU_NONCACHE_OFFSET1
+#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1_MASK 0x01FFFFFFL
+//UVD_VCPU_NONCACHE_SIZE1
+#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1__SHIFT 0x0
+#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1_MASK 0x001FFFFFL
+//UVD_VCPU_CNTL
+#define UVD_VCPU_CNTL__IRQ_ERR__SHIFT 0x0
+#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4__SHIFT 0x4
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE__SHIFT 0x5
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET__SHIFT 0x6
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x7
+#define UVD_VCPU_CNTL__ABORT_REQ__SHIFT 0x8
+#define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x9
+#define UVD_VCPU_CNTL__TRCE_EN__SHIFT 0xa
+#define UVD_VCPU_CNTL__TRCE_MUX__SHIFT 0xb
+#define UVD_VCPU_CNTL__DBG_MUX__SHIFT 0xd
+#define UVD_VCPU_CNTL__JTAG_EN__SHIFT 0x10
+#define UVD_VCPU_CNTL__TIMEOUT_DIS__SHIFT 0x12
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14
+#define UVD_VCPU_CNTL__BLK_RST__SHIFT 0x1c
+#define UVD_VCPU_CNTL__RUNSTALL__SHIFT 0x1d
+#define UVD_VCPU_CNTL__SRE_CMDIF_DRST__SHIFT 0x1e
+#define UVD_VCPU_CNTL__SRE_CMDIF_VRST__SHIFT 0x1f
+#define UVD_VCPU_CNTL__IRQ_ERR_MASK 0x0000000FL
+#define UVD_VCPU_CNTL__AXI_MAX_BRST_SIZE_IS_4_MASK 0x00000010L
+#define UVD_VCPU_CNTL__PMB_ED_ENABLE_MASK 0x00000020L
+#define UVD_VCPU_CNTL__PMB_SOFT_RESET_MASK 0x00000040L
+#define UVD_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00000080L
+#define UVD_VCPU_CNTL__ABORT_REQ_MASK 0x00000100L
+#define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L
+#define UVD_VCPU_CNTL__TRCE_EN_MASK 0x00000400L
+#define UVD_VCPU_CNTL__TRCE_MUX_MASK 0x00001800L
+#define UVD_VCPU_CNTL__DBG_MUX_MASK 0x0000E000L
+#define UVD_VCPU_CNTL__JTAG_EN_MASK 0x00010000L
+#define UVD_VCPU_CNTL__TIMEOUT_DIS_MASK 0x00040000L
+#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L
+#define UVD_VCPU_CNTL__BLK_RST_MASK 0x10000000L
+#define UVD_VCPU_CNTL__RUNSTALL_MASK 0x20000000L
+#define UVD_VCPU_CNTL__SRE_CMDIF_DRST_MASK 0x40000000L
+#define UVD_VCPU_CNTL__SRE_CMDIF_VRST_MASK 0x80000000L
+//UVD_VCPU_PRID
+#define UVD_VCPU_PRID__PRID__SHIFT 0x0
+#define UVD_VCPU_PRID__PRID_MASK 0x0000FFFFL
+//UVD_VCPU_TRCE
+#define UVD_VCPU_TRCE__PC__SHIFT 0x0
+#define UVD_VCPU_TRCE__PC_MASK 0x0FFFFFFFL
+//UVD_VCPU_TRCE_RD
+#define UVD_VCPU_TRCE_RD__DATA__SHIFT 0x0
+#define UVD_VCPU_TRCE_RD__DATA_MASK 0xFFFFFFFFL
+//UVD_VCPU_IND_INDEX
+#define UVD_VCPU_IND_INDEX__INDEX__SHIFT 0x0
+#define UVD_VCPU_IND_INDEX__INDEX_MASK 0x000001FFL
+//UVD_VCPU_IND_DATA
+#define UVD_VCPU_IND_DATA__DATA__SHIFT 0x0
+#define UVD_VCPU_IND_DATA__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_uvd_mpcdec
+//UVD_MP_SWAP_CNTL
+#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP__SHIFT 0x0
+#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP__SHIFT 0x2
+#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP__SHIFT 0x4
+#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP__SHIFT 0x6
+#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP__SHIFT 0x8
+#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP__SHIFT 0xa
+#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP__SHIFT 0xc
+#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP__SHIFT 0xe
+#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP__SHIFT 0x10
+#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP__SHIFT 0x12
+#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP__SHIFT 0x14
+#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP__SHIFT 0x16
+#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP__SHIFT 0x18
+#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP__SHIFT 0x1a
+#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP__SHIFT 0x1c
+#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP__SHIFT 0x1e
+#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP_MASK 0x00000003L
+#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP_MASK 0x0000000CL
+#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP_MASK 0x00000030L
+#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP_MASK 0x000000C0L
+#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP_MASK 0x00000300L
+#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP_MASK 0x00000C00L
+#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP_MASK 0x00003000L
+#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP_MASK 0x0000C000L
+#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP_MASK 0x00030000L
+#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP_MASK 0x000C0000L
+#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP_MASK 0x00300000L
+#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP_MASK 0x00C00000L
+#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP_MASK 0x03000000L
+#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP_MASK 0x0C000000L
+#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP_MASK 0x30000000L
+#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP_MASK 0xC0000000L
+//UVD_MP_SWAP_CNTL2
+#define UVD_MP_SWAP_CNTL2__MP_REF16_MC_SWAP__SHIFT 0x0
+#define UVD_MP_SWAP_CNTL2__MP_REF16_MC_SWAP_MASK 0x00000003L
+//UVD_MPC_LUMA_SRCH
+#define UVD_MPC_LUMA_SRCH__CNTR__SHIFT 0x0
+#define UVD_MPC_LUMA_SRCH__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_LUMA_HIT
+#define UVD_MPC_LUMA_HIT__CNTR__SHIFT 0x0
+#define UVD_MPC_LUMA_HIT__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_LUMA_HITPEND
+#define UVD_MPC_LUMA_HITPEND__CNTR__SHIFT 0x0
+#define UVD_MPC_LUMA_HITPEND__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_CHROMA_SRCH
+#define UVD_MPC_CHROMA_SRCH__CNTR__SHIFT 0x0
+#define UVD_MPC_CHROMA_SRCH__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_CHROMA_HIT
+#define UVD_MPC_CHROMA_HIT__CNTR__SHIFT 0x0
+#define UVD_MPC_CHROMA_HIT__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_CHROMA_HITPEND
+#define UVD_MPC_CHROMA_HITPEND__CNTR__SHIFT 0x0
+#define UVD_MPC_CHROMA_HITPEND__CNTR_MASK 0xFFFFFFFFL
+//UVD_MPC_CNTL
+#define UVD_MPC_CNTL__BLK_RST__SHIFT 0x0
+#define UVD_MPC_CNTL__REG_MPC1_PERF_SELECT__SHIFT 0x1
+#define UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT 0x3
+#define UVD_MPC_CNTL__PERF_RST__SHIFT 0x6
+#define UVD_MPC_CNTL__REG_MPC_CNTL_BACKWARD_COMPATIBILITY__SHIFT 0x7
+#define UVD_MPC_CNTL__DBG_MUX__SHIFT 0x8
+#define UVD_MPC_CNTL__AVE_WEIGHT__SHIFT 0x10
+#define UVD_MPC_CNTL__URGENT_EN__SHIFT 0x12
+#define UVD_MPC_CNTL__SMPAT_REQ_SPEED_UP__SHIFT 0x13
+#define UVD_MPC_CNTL__TEST_MODE_EN__SHIFT 0x14
+#define UVD_MPC_CNTL__BLK_RST_MASK 0x00000001L
+#define UVD_MPC_CNTL__REG_MPC1_PERF_SELECT_MASK 0x00000002L
+#define UVD_MPC_CNTL__REPLACEMENT_MODE_MASK 0x00000038L
+#define UVD_MPC_CNTL__PERF_RST_MASK 0x00000040L
+#define UVD_MPC_CNTL__REG_MPC_CNTL_BACKWARD_COMPATIBILITY_MASK 0x00000080L
+#define UVD_MPC_CNTL__DBG_MUX_MASK 0x00000F00L
+#define UVD_MPC_CNTL__AVE_WEIGHT_MASK 0x00030000L
+#define UVD_MPC_CNTL__URGENT_EN_MASK 0x00040000L
+#define UVD_MPC_CNTL__SMPAT_REQ_SPEED_UP_MASK 0x00080000L
+#define UVD_MPC_CNTL__TEST_MODE_EN_MASK 0x00300000L
+//UVD_MPC_PITCH
+#define UVD_MPC_PITCH__LUMA_PITCH__SHIFT 0x0
+#define UVD_MPC_PITCH__LUMA_PITCH_MASK 0x000007FFL
+//UVD_MPC_SET_MUXA0
+#define UVD_MPC_SET_MUXA0__VARA_0__SHIFT 0x0
+#define UVD_MPC_SET_MUXA0__VARA_1__SHIFT 0x6
+#define UVD_MPC_SET_MUXA0__VARA_2__SHIFT 0xc
+#define UVD_MPC_SET_MUXA0__VARA_3__SHIFT 0x12
+#define UVD_MPC_SET_MUXA0__VARA_4__SHIFT 0x18
+#define UVD_MPC_SET_MUXA0__VARA_0_MASK 0x0000003FL
+#define UVD_MPC_SET_MUXA0__VARA_1_MASK 0x00000FC0L
+#define UVD_MPC_SET_MUXA0__VARA_2_MASK 0x0003F000L
+#define UVD_MPC_SET_MUXA0__VARA_3_MASK 0x00FC0000L
+#define UVD_MPC_SET_MUXA0__VARA_4_MASK 0x3F000000L
+//UVD_MPC_SET_MUXA1
+#define UVD_MPC_SET_MUXA1__VARA_5__SHIFT 0x0
+#define UVD_MPC_SET_MUXA1__VARA_6__SHIFT 0x6
+#define UVD_MPC_SET_MUXA1__VARA_7__SHIFT 0xc
+#define UVD_MPC_SET_MUXA1__VARA_5_MASK 0x0000003FL
+#define UVD_MPC_SET_MUXA1__VARA_6_MASK 0x00000FC0L
+#define UVD_MPC_SET_MUXA1__VARA_7_MASK 0x0003F000L
+//UVD_MPC_SET_MUXB0
+#define UVD_MPC_SET_MUXB0__VARB_0__SHIFT 0x0
+#define UVD_MPC_SET_MUXB0__VARB_1__SHIFT 0x6
+#define UVD_MPC_SET_MUXB0__VARB_2__SHIFT 0xc
+#define UVD_MPC_SET_MUXB0__VARB_3__SHIFT 0x12
+#define UVD_MPC_SET_MUXB0__VARB_4__SHIFT 0x18
+#define UVD_MPC_SET_MUXB0__VARB_0_MASK 0x0000003FL
+#define UVD_MPC_SET_MUXB0__VARB_1_MASK 0x00000FC0L
+#define UVD_MPC_SET_MUXB0__VARB_2_MASK 0x0003F000L
+#define UVD_MPC_SET_MUXB0__VARB_3_MASK 0x00FC0000L
+#define UVD_MPC_SET_MUXB0__VARB_4_MASK 0x3F000000L
+//UVD_MPC_SET_MUXB1
+#define UVD_MPC_SET_MUXB1__VARB_5__SHIFT 0x0
+#define UVD_MPC_SET_MUXB1__VARB_6__SHIFT 0x6
+#define UVD_MPC_SET_MUXB1__VARB_7__SHIFT 0xc
+#define UVD_MPC_SET_MUXB1__VARB_5_MASK 0x0000003FL
+#define UVD_MPC_SET_MUXB1__VARB_6_MASK 0x00000FC0L
+#define UVD_MPC_SET_MUXB1__VARB_7_MASK 0x0003F000L
+//UVD_MPC_SET_MUX
+#define UVD_MPC_SET_MUX__SET_0__SHIFT 0x0
+#define UVD_MPC_SET_MUX__SET_1__SHIFT 0x3
+#define UVD_MPC_SET_MUX__SET_2__SHIFT 0x6
+#define UVD_MPC_SET_MUX__SET_0_MASK 0x00000007L
+#define UVD_MPC_SET_MUX__SET_1_MASK 0x00000038L
+#define UVD_MPC_SET_MUX__SET_2_MASK 0x000001C0L
+//UVD_MPC_SET_ALU
+#define UVD_MPC_SET_ALU__FUNCT__SHIFT 0x0
+#define UVD_MPC_SET_ALU__OPERAND__SHIFT 0x4
+#define UVD_MPC_SET_ALU__FUNCT_MASK 0x00000007L
+#define UVD_MPC_SET_ALU__OPERAND_MASK 0x00000FF0L
+//UVD_MPC_PERF0
+#define UVD_MPC_PERF0__MAX_LAT__SHIFT 0x0
+#define UVD_MPC_PERF0__MAX_LAT_MASK 0x000003FFL
+//UVD_MPC_PERF1
+#define UVD_MPC_PERF1__AVE_LAT__SHIFT 0x0
+#define UVD_MPC_PERF1__AVE_LAT_MASK 0x000003FFL
+//UVD_MPC_IND_INDEX
+#define UVD_MPC_IND_INDEX__INDEX__SHIFT 0x0
+#define UVD_MPC_IND_INDEX__INDEX_MASK 0x000001FFL
+//UVD_MPC_IND_DATA
+#define UVD_MPC_IND_DATA__DATA__SHIFT 0x0
+#define UVD_MPC_IND_DATA__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_uvd_rbcdec
+//UVD_RBC_IB_SIZE
+#define UVD_RBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_RBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_RBC_IB_SIZE_UPDATE
+#define UVD_RBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_RBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_RBC_RB_CNTL
+#define UVD_RBC_RB_CNTL__RB_BUFSZ__SHIFT 0x0
+#define UVD_RBC_RB_CNTL__RB_BLKSZ__SHIFT 0x8
+#define UVD_RBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x10
+#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN__SHIFT 0x14
+#define UVD_RBC_RB_CNTL__RB_NO_UPDATE__SHIFT 0x18
+#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1c
+#define UVD_RBC_RB_CNTL__BLK_RST__SHIFT 0x1d
+#define UVD_RBC_RB_CNTL__RB_BUFSZ_MASK 0x0000001FL
+#define UVD_RBC_RB_CNTL__RB_BLKSZ_MASK 0x00001F00L
+#define UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK 0x00010000L
+#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN_MASK 0x00100000L
+#define UVD_RBC_RB_CNTL__RB_NO_UPDATE_MASK 0x01000000L
+#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x10000000L
+#define UVD_RBC_RB_CNTL__BLK_RST_MASK 0x20000000L
+//UVD_RBC_RB_RPTR_ADDR
+#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x0
+#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFFL
+//UVD_RBC_VCPU_ACCESS
+#define UVD_RBC_VCPU_ACCESS__ENABLE_RBC__SHIFT 0x0
+#define UVD_RBC_VCPU_ACCESS__ENABLE_RBC_MASK 0x00000001L
+//UVD_FW_SEMAPHORE_CNTL
+#define UVD_FW_SEMAPHORE_CNTL__START__SHIFT 0x0
+#define UVD_FW_SEMAPHORE_CNTL__BUSY__SHIFT 0x8
+#define UVD_FW_SEMAPHORE_CNTL__PASS__SHIFT 0x9
+#define UVD_FW_SEMAPHORE_CNTL__START_MASK 0x00000001L
+#define UVD_FW_SEMAPHORE_CNTL__BUSY_MASK 0x00000100L
+#define UVD_FW_SEMAPHORE_CNTL__PASS_MASK 0x00000200L
+//UVD_RBC_READ_REQ_URGENT_CNTL
+#define UVD_RBC_READ_REQ_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_RBC_READ_REQ_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_RBC_RB_WPTR_CNTL
+#define UVD_RBC_RB_WPTR_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x0
+#define UVD_RBC_RB_WPTR_CNTL__RB_PRE_WRITE_TIMER_MASK 0x00007FFFL
+//UVD_RBC_WPTR_STATUS
+#define UVD_RBC_WPTR_STATUS__RB_WPTR_IN_USE__SHIFT 0x4
+#define UVD_RBC_WPTR_STATUS__RB_WPTR_IN_USE_MASK 0x007FFFF0L
+//UVD_RBC_WPTR_POLL_CNTL
+#define UVD_RBC_WPTR_POLL_CNTL__POLL_FREQ__SHIFT 0x0
+#define UVD_RBC_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define UVD_RBC_WPTR_POLL_CNTL__POLL_FREQ_MASK 0x0000FFFFL
+#define UVD_RBC_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//UVD_RBC_WPTR_POLL_ADDR
+#define UVD_RBC_WPTR_POLL_ADDR__POLL_ADDR__SHIFT 0x2
+#define UVD_RBC_WPTR_POLL_ADDR__POLL_ADDR_MASK 0xFFFFFFFCL
+//UVD_SEMA_CMD
+#define UVD_SEMA_CMD__REQ_CMD__SHIFT 0x0
+#define UVD_SEMA_CMD__WR_PHASE__SHIFT 0x4
+#define UVD_SEMA_CMD__MODE__SHIFT 0x6
+#define UVD_SEMA_CMD__VMID_EN__SHIFT 0x7
+#define UVD_SEMA_CMD__VMID__SHIFT 0x8
+#define UVD_SEMA_CMD__REQ_CMD_MASK 0x0000000FL
+#define UVD_SEMA_CMD__WR_PHASE_MASK 0x00000030L
+#define UVD_SEMA_CMD__MODE_MASK 0x00000040L
+#define UVD_SEMA_CMD__VMID_EN_MASK 0x00000080L
+#define UVD_SEMA_CMD__VMID_MASK 0x00000F00L
+//UVD_SEMA_ADDR_LOW
+#define UVD_SEMA_ADDR_LOW__ADDR_26_3__SHIFT 0x0
+#define UVD_SEMA_ADDR_LOW__ADDR_26_3_MASK 0x00FFFFFFL
+//UVD_SEMA_ADDR_HIGH
+#define UVD_SEMA_ADDR_HIGH__ADDR_47_27__SHIFT 0x0
+#define UVD_SEMA_ADDR_HIGH__ADDR_47_27_MASK 0x001FFFFFL
+//UVD_ENGINE_CNTL
+#define UVD_ENGINE_CNTL__ENGINE_START__SHIFT 0x0
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE__SHIFT 0x1
+#define UVD_ENGINE_CNTL__NJ_PF_HANDLE_DISABLE__SHIFT 0x2
+#define UVD_ENGINE_CNTL__ENGINE_START_MASK 0x00000001L
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE_MASK 0x00000002L
+#define UVD_ENGINE_CNTL__NJ_PF_HANDLE_DISABLE_MASK 0x00000004L
+//UVD_SEMA_TIMEOUT_STATUS
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x0
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT__SHIFT 0x1
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x2
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR__SHIFT 0x3
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000001L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT_MASK 0x00000002L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000004L
+#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR_MASK 0x00000008L
+//UVD_SEMA_CNTL
+#define UVD_SEMA_CNTL__SEMAPHORE_EN__SHIFT 0x0
+#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS__SHIFT 0x1
+#define UVD_SEMA_CNTL__SEMAPHORE_EN_MASK 0x00000001L
+#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS_MASK 0x00000002L
+//UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN__SHIFT 0x0
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT__SHIFT 0x1
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN_MASK 0x00000001L
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT_MASK 0x001FFFFEL
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+//UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN__SHIFT 0x0
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT__SHIFT 0x1
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN_MASK 0x00000001L
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT_MASK 0x001FFFFEL
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+//UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN__SHIFT 0x0
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT__SHIFT 0x1
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN_MASK 0x00000001L
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT_MASK 0x001FFFFEL
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L
+//UVD_JOB_START
+#define UVD_JOB_START__JOB_START__SHIFT 0x0
+#define UVD_JOB_START__JOB_START_MASK 0x00000001L
+//UVD_RBC_BUF_STATUS
+#define UVD_RBC_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_RBC_BUF_STATUS__IB_BUF_VALID__SHIFT 0x8
+#define UVD_RBC_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_RBC_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x13
+#define UVD_RBC_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x16
+#define UVD_RBC_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x19
+#define UVD_RBC_BUF_STATUS__RB_BUF_VALID_MASK 0x000000FFL
+#define UVD_RBC_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FF00L
+#define UVD_RBC_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x00070000L
+#define UVD_RBC_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x00380000L
+#define UVD_RBC_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x01C00000L
+#define UVD_RBC_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x0E000000L
+//UVD_RBC_SWAP_CNTL
+#define UVD_RBC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_RBC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_RBC_SWAP_CNTL__RB_RPTR_MC_SWAP__SHIFT 0x4
+#define UVD_RBC_SWAP_CNTL__RB_WR_MC_SWAP__SHIFT 0x1a
+#define UVD_RBC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_RBC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_RBC_SWAP_CNTL__RB_RPTR_MC_SWAP_MASK 0x00000030L
+#define UVD_RBC_SWAP_CNTL__RB_WR_MC_SWAP_MASK 0x0C000000L
+
+
+// addressBlock: aid_uvd0_lmi_adpdec
+//UVD_LMI_RE_64BIT_BAR_LOW
+#define UVD_LMI_RE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_RE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_RE_64BIT_BAR_HIGH
+#define UVD_LMI_RE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_RE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_IT_64BIT_BAR_LOW
+#define UVD_LMI_IT_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_IT_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_IT_64BIT_BAR_HIGH
+#define UVD_LMI_IT_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_IT_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MP_64BIT_BAR_LOW
+#define UVD_LMI_MP_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MP_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MP_64BIT_BAR_HIGH
+#define UVD_LMI_MP_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MP_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_CM_64BIT_BAR_LOW
+#define UVD_LMI_CM_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_CM_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_CM_64BIT_BAR_HIGH
+#define UVD_LMI_CM_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_CM_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_DB_64BIT_BAR_LOW
+#define UVD_LMI_DB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_DB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_DB_64BIT_BAR_HIGH
+#define UVD_LMI_DB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_DB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_DBW_64BIT_BAR_LOW
+#define UVD_LMI_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_DBW_64BIT_BAR_HIGH
+#define UVD_LMI_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_IDCT_64BIT_BAR_LOW
+#define UVD_LMI_IDCT_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_IDCT_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_IDCT_64BIT_BAR_HIGH
+#define UVD_LMI_IDCT_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_IDCT_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S0_64BIT_BAR_LOW
+#define UVD_LMI_MPRD_S0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MPRD_S0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S0_64BIT_BAR_HIGH
+#define UVD_LMI_MPRD_S0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MPRD_S0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S1_64BIT_BAR_LOW
+#define UVD_LMI_MPRD_S1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MPRD_S1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_S1_64BIT_BAR_HIGH
+#define UVD_LMI_MPRD_S1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MPRD_S1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_DBW_64BIT_BAR_LOW
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MPRD_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MPC_64BIT_BAR_LOW
+#define UVD_LMI_MPC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MPC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MPC_64BIT_BAR_HIGH
+#define UVD_LMI_MPC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MPC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_RB_64BIT_BAR_LOW
+#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_RB_64BIT_BAR_HIGH
+#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_IB_64BIT_BAR_LOW
+#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_RBC_IB_64BIT_BAR_HIGH
+#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_LBSI_64BIT_BAR_LOW
+#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_LBSI_64BIT_BAR_HIGH
+#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC0_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC1_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_CENC_64BIT_BAR_LOW
+#define UVD_LMI_CENC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_CENC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_CENC_64BIT_BAR_HIGH
+#define UVD_LMI_CENC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_CENC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_SRE_64BIT_BAR_LOW
+#define UVD_LMI_SRE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_SRE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_SRE_64BIT_BAR_HIGH
+#define UVD_LMI_SRE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_SRE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_GPGPU_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_CURR_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_REF_64BIT_BAR_LOW
+#define UVD_LMI_MIF_REF_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_REF_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_REF_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_REF_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_REF_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_DBW_64BIT_BAR_LOW
+#define UVD_LMI_MIF_DBW_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_DBW_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_DBW_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_DBW_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_DBW_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_CM_COLOC_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP0_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP1_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP2_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP3_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSP3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD0_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD1_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD2_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD3_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD4_64BIT_BAR_LOW
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_BSD4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR_64BIT_BAR_LOW
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_SCLR2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_SPH_64BIT_BAR_HIGH
+#define UVD_LMI_SPH_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_SPH_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_IMAGEPASTE_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_PRIVACY_CHROMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_ADP_ATOMIC_CONFIG
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER0_WR_CACHE__SHIFT 0x0
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER1_WR_CACHE__SHIFT 0x4
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER2_WR_CACHE__SHIFT 0x8
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER3_WR_CACHE__SHIFT 0xc
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_RD_URG__SHIFT 0x10
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER0_WR_CACHE_MASK 0x0000000FL
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER1_WR_CACHE_MASK 0x000000F0L
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER2_WR_CACHE_MASK 0x00000F00L
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_USER3_WR_CACHE_MASK 0x0000F000L
+#define UVD_ADP_ATOMIC_CONFIG__ATOMIC_RD_URG_MASK 0x000F0000L
+//UVD_LMI_ARB_CTRL2
+#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN__SHIFT 0x0
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN__SHIFT 0x1
+#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST__SHIFT 0x2
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST__SHIFT 0x6
+#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX__SHIFT 0xa
+#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX__SHIFT 0x14
+#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST_MASK 0x0000003CL
+#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST_MASK 0x000003C0L
+#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX_MASK 0x000FFC00L
+#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX_MASK 0xFFF00000L
+//UVD_LMI_VCPU_CACHE_VMIDS_MULTI
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID__SHIFT 0x4
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID__SHIFT 0x8
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID__SHIFT 0xc
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID__SHIFT 0x10
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID__SHIFT 0x14
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID__SHIFT 0x18
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID__SHIFT 0x1c
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID_MASK 0x0000000FL
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID_MASK 0x000000F0L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID_MASK 0x00000F00L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID_MASK 0x0000F000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID_MASK 0x000F0000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID_MASK 0x00F00000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID_MASK 0x0F000000L
+#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID_MASK 0xF0000000L
+//UVD_LMI_VCPU_NC_VMIDS_MULTI
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID__SHIFT 0x4
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID__SHIFT 0x8
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID__SHIFT 0xc
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID__SHIFT 0x10
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID__SHIFT 0x14
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID__SHIFT 0x18
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID_MASK 0x000000F0L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID_MASK 0x00000F00L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID_MASK 0x0000F000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID_MASK 0x000F0000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID_MASK 0x00F00000L
+#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID_MASK 0x0F000000L
+//UVD_LMI_LAT_CTRL
+#define UVD_LMI_LAT_CTRL__SCALE__SHIFT 0x0
+#define UVD_LMI_LAT_CTRL__MAX_START__SHIFT 0x8
+#define UVD_LMI_LAT_CTRL__MIN_START__SHIFT 0x9
+#define UVD_LMI_LAT_CTRL__AVG_START__SHIFT 0xa
+#define UVD_LMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb
+#define UVD_LMI_LAT_CTRL__SKIP__SHIFT 0x10
+#define UVD_LMI_LAT_CTRL__SCALE_MASK 0x000000FFL
+#define UVD_LMI_LAT_CTRL__MAX_START_MASK 0x00000100L
+#define UVD_LMI_LAT_CTRL__MIN_START_MASK 0x00000200L
+#define UVD_LMI_LAT_CTRL__AVG_START_MASK 0x00000400L
+#define UVD_LMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L
+#define UVD_LMI_LAT_CTRL__SKIP_MASK 0x000F0000L
+//UVD_LMI_LAT_CNTR
+#define UVD_LMI_LAT_CNTR__MAX_LAT__SHIFT 0x0
+#define UVD_LMI_LAT_CNTR__MIN_LAT__SHIFT 0x8
+#define UVD_LMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL
+#define UVD_LMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L
+//UVD_LMI_AVG_LAT_CNTR
+#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10
+#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L
+#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L
+//UVD_LMI_SPH
+#define UVD_LMI_SPH__ADDR__SHIFT 0x0
+#define UVD_LMI_SPH__STS__SHIFT 0x1c
+#define UVD_LMI_SPH__STS_VALID__SHIFT 0x1e
+#define UVD_LMI_SPH__STS_OVERFLOW__SHIFT 0x1f
+#define UVD_LMI_SPH__ADDR_MASK 0x0FFFFFFFL
+#define UVD_LMI_SPH__STS_MASK 0x30000000L
+#define UVD_LMI_SPH__STS_VALID_MASK 0x40000000L
+#define UVD_LMI_SPH__STS_OVERFLOW_MASK 0x80000000L
+//UVD_LMI_VCPU_CACHE_VMID
+#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0
+#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL
+//UVD_LMI_CTRL2
+#define UVD_LMI_CTRL2__SPH_DIS__SHIFT 0x0
+#define UVD_LMI_CTRL2__STALL_ARB__SHIFT 0x1
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT__SHIFT 0x2
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT__SHIFT 0x3
+#define UVD_LMI_CTRL2__CRC1_RESET__SHIFT 0x4
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS__SHIFT 0x7
+#define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x8
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x9
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0xb
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN__SHIFT 0xd
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN__SHIFT 0xe
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN__SHIFT 0xf
+#define UVD_LMI_CTRL2__RE_OFFLOAD_EN__SHIFT 0x10
+#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT 0x11
+#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP__SHIFT 0x19
+#define UVD_LMI_CTRL2__NJ_MIF_GATING__SHIFT 0x1a
+#define UVD_LMI_CTRL2__CRC1_SEL__SHIFT 0x1b
+#define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L
+#define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L
+#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L
+#define UVD_LMI_CTRL2__MASK_UMC_URGENT_MASK 0x00000008L
+#define UVD_LMI_CTRL2__CRC1_RESET_MASK 0x00000010L
+#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS_MASK 0x00000080L
+#define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L
+#define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L
+#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L
+#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN_MASK 0x00004000L
+#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN_MASK 0x00008000L
+#define UVD_LMI_CTRL2__RE_OFFLOAD_EN_MASK 0x00010000L
+#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM_MASK 0x01FE0000L
+#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP_MASK 0x02000000L
+#define UVD_LMI_CTRL2__NJ_MIF_GATING_MASK 0x04000000L
+#define UVD_LMI_CTRL2__CRC1_SEL_MASK 0xF8000000L
+//UVD_LMI_URGENT_CTRL
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL__SHIFT 0x1
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x2
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x8
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL__SHIFT 0x9
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0xa
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL__SHIFT 0x10
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL__SHIFT 0x11
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT__SHIFT 0x12
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL__SHIFT 0x18
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL__SHIFT 0x19
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT__SHIFT 0x1a
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL_MASK 0x00000002L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x0000003CL
+#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00000100L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL_MASK 0x00000200L
+#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00003C00L
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL_MASK 0x00010000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL_MASK 0x00020000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT_MASK 0x003C0000L
+#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL_MASK 0x01000000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL_MASK 0x02000000L
+#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT_MASK 0x3C000000L
+//UVD_LMI_CTRL
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT 0x0
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN__SHIFT 0x8
+#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x9
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT__SHIFT 0xb
+#define UVD_LMI_CTRL__MASK_MC_URGENT__SHIFT 0xc
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN__SHIFT 0xd
+#define UVD_LMI_CTRL__CRC_RESET__SHIFT 0xe
+#define UVD_LMI_CTRL__CRC_SEL__SHIFT 0xf
+#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL__SHIFT 0x14
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x15
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN__SHIFT 0x16
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN__SHIFT 0x17
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN__SHIFT 0x18
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN__SHIFT 0x19
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN__SHIFT 0x1a
+#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ__SHIFT 0x1b
+#define UVD_LMI_CTRL__MC_BLK_RST__SHIFT 0x1c
+#define UVD_LMI_CTRL__UMC_BLK_RST__SHIFT 0x1d
+#define UVD_LMI_CTRL__RFU__SHIFT 0x1e
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_MASK 0x000000FFL
+#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK 0x00000100L
+#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L
+#define UVD_LMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000800L
+#define UVD_LMI_CTRL__MASK_MC_URGENT_MASK 0x00001000L
+#define UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK 0x00002000L
+#define UVD_LMI_CTRL__CRC_RESET_MASK 0x00004000L
+#define UVD_LMI_CTRL__CRC_SEL_MASK 0x000F8000L
+#define UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK 0x00100000L
+#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L
+#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN_MASK 0x00400000L
+#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN_MASK 0x00800000L
+#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L
+#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L
+#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN_MASK 0x04000000L
+#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ_MASK 0x08000000L
+#define UVD_LMI_CTRL__MC_BLK_RST_MASK 0x10000000L
+#define UVD_LMI_CTRL__UMC_BLK_RST_MASK 0x20000000L
+#define UVD_LMI_CTRL__RFU_MASK 0xC0000000L
+//UVD_LMI_STATUS
+#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x0
+#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x1
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x2
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x3
+#define UVD_LMI_STATUS__UMC_READ_CLEAN__SHIFT 0x4
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN__SHIFT 0x5
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x6
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE__SHIFT 0x7
+#define UVD_LMI_STATUS__READ_CLEAN_RAW__SHIFT 0x8
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x9
+#define UVD_LMI_STATUS__UMC_UVD_IDLE__SHIFT 0xa
+#define UVD_LMI_STATUS__UMC_AVP_IDLE__SHIFT 0xb
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN__SHIFT 0xc
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN__SHIFT 0xd
+#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN__SHIFT 0x12
+#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN__SHIFT 0x13
+#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN__SHIFT 0x14
+#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN__SHIFT 0x15
+#define UVD_LMI_STATUS__CENC_READ_CLEAN__SHIFT 0x16
+#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L
+#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L
+#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L
+#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_MASK 0x00000010L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_MASK 0x00000020L
+#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L
+#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE_MASK 0x00000080L
+#define UVD_LMI_STATUS__READ_CLEAN_RAW_MASK 0x00000100L
+#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L
+#define UVD_LMI_STATUS__UMC_UVD_IDLE_MASK 0x00000400L
+#define UVD_LMI_STATUS__UMC_AVP_IDLE_MASK 0x00000800L
+#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN_MASK 0x00001000L
+#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN_MASK 0x00002000L
+#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN_MASK 0x00040000L
+#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN_MASK 0x00080000L
+#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN_MASK 0x00100000L
+#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN_MASK 0x00200000L
+#define UVD_LMI_STATUS__CENC_READ_CLEAN_MASK 0x00400000L
+//UVD_LMI_PERFMON_CTRL
+#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0
+#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8
+#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L
+#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L
+//UVD_LMI_PERFMON_COUNT_LO
+#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0
+#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL
+//UVD_LMI_PERFMON_COUNT_HI
+#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0
+#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL
+//UVD_LMI_ADP_SWAP_CNTL
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_R_MC_SWAP__SHIFT 0x6
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_W_MC_SWAP__SHIFT 0x8
+#define UVD_LMI_ADP_SWAP_CNTL__CM_MC_SWAP__SHIFT 0xa
+#define UVD_LMI_ADP_SWAP_CNTL__IT_MC_SWAP__SHIFT 0xc
+#define UVD_LMI_ADP_SWAP_CNTL__DB_R_MC_SWAP__SHIFT 0xe
+#define UVD_LMI_ADP_SWAP_CNTL__DB_W_MC_SWAP__SHIFT 0x10
+#define UVD_LMI_ADP_SWAP_CNTL__CSM_MC_SWAP__SHIFT 0x12
+#define UVD_LMI_ADP_SWAP_CNTL__PREF_MC_SWAP__SHIFT 0x14
+#define UVD_LMI_ADP_SWAP_CNTL__DBW_MC_SWAP__SHIFT 0x18
+#define UVD_LMI_ADP_SWAP_CNTL__RE_MC_SWAP__SHIFT 0x1c
+#define UVD_LMI_ADP_SWAP_CNTL__MP_MC_SWAP__SHIFT 0x1e
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_R_MC_SWAP_MASK 0x000000C0L
+#define UVD_LMI_ADP_SWAP_CNTL__VCPU_W_MC_SWAP_MASK 0x00000300L
+#define UVD_LMI_ADP_SWAP_CNTL__CM_MC_SWAP_MASK 0x00000C00L
+#define UVD_LMI_ADP_SWAP_CNTL__IT_MC_SWAP_MASK 0x00003000L
+#define UVD_LMI_ADP_SWAP_CNTL__DB_R_MC_SWAP_MASK 0x0000C000L
+#define UVD_LMI_ADP_SWAP_CNTL__DB_W_MC_SWAP_MASK 0x00030000L
+#define UVD_LMI_ADP_SWAP_CNTL__CSM_MC_SWAP_MASK 0x000C0000L
+#define UVD_LMI_ADP_SWAP_CNTL__PREF_MC_SWAP_MASK 0x00300000L
+#define UVD_LMI_ADP_SWAP_CNTL__DBW_MC_SWAP_MASK 0x03000000L
+#define UVD_LMI_ADP_SWAP_CNTL__RE_MC_SWAP_MASK 0x30000000L
+#define UVD_LMI_ADP_SWAP_CNTL__MP_MC_SWAP_MASK 0xC0000000L
+//UVD_LMI_RBC_RB_VMID
+#define UVD_LMI_RBC_RB_VMID__RB_VMID__SHIFT 0x0
+#define UVD_LMI_RBC_RB_VMID__RB_VMID_MASK 0x0000000FL
+//UVD_LMI_RBC_IB_VMID
+#define UVD_LMI_RBC_IB_VMID__IB_VMID__SHIFT 0x0
+#define UVD_LMI_RBC_IB_VMID__IB_VMID_MASK 0x0000000FL
+//UVD_LMI_MC_CREDITS
+#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS__SHIFT 0x0
+#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS__SHIFT 0x8
+#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS__SHIFT 0x10
+#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS__SHIFT 0x18
+#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS_MASK 0x0000003FL
+#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS_MASK 0x00003F00L
+#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS_MASK 0x003F0000L
+#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS_MASK 0x3F000000L
+//UVD_LMI_ADP_IND_INDEX
+#define UVD_LMI_ADP_IND_INDEX__INDEX__SHIFT 0x0
+#define UVD_LMI_ADP_IND_INDEX__INDEX_MASK 0x00001FFFL
+//UVD_LMI_ADP_IND_DATA
+#define UVD_LMI_ADP_IND_DATA__DATA__SHIFT 0x0
+#define UVD_LMI_ADP_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//UVD_LMI_ADP_PF_EN
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE0_PF_EN__SHIFT 0x0
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE1_PF_EN__SHIFT 0x1
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE2_PF_EN__SHIFT 0x2
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE0_PF_EN_MASK 0x00000001L
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE1_PF_EN_MASK 0x00000002L
+#define UVD_LMI_ADP_PF_EN__VCPU_CACHE2_PF_EN_MASK 0x00000004L
+//UVD_LMI_PREF_CTRL
+#define UVD_LMI_PREF_CTRL__PREF_RST__SHIFT 0x0
+#define UVD_LMI_PREF_CTRL__PREF_BUSY_STATUS__SHIFT 0x1
+#define UVD_LMI_PREF_CTRL__PREF_WSTRB__SHIFT 0x2
+#define UVD_LMI_PREF_CTRL__PREF_WRITE_SIZE__SHIFT 0x3
+#define UVD_LMI_PREF_CTRL__PREF_STEP_SIZE__SHIFT 0x4
+#define UVD_LMI_PREF_CTRL__PREF_SIZE__SHIFT 0x13
+#define UVD_LMI_PREF_CTRL__PREF_RST_MASK 0x00000001L
+#define UVD_LMI_PREF_CTRL__PREF_BUSY_STATUS_MASK 0x00000002L
+#define UVD_LMI_PREF_CTRL__PREF_WSTRB_MASK 0x00000004L
+#define UVD_LMI_PREF_CTRL__PREF_WRITE_SIZE_MASK 0x00000008L
+#define UVD_LMI_PREF_CTRL__PREF_STEP_SIZE_MASK 0x00000070L
+#define UVD_LMI_PREF_CTRL__PREF_SIZE_MASK 0xFFF80000L
+//UVD_LMI_MIF_REF_LUMA_64BIT_BAR_LOW
+#define UVD_LMI_MIF_REF_LUMA_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MIF_REF_LUMA_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH
+#define UVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MIF_REF_LUMA_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//VCN_RAS_CNTL
+#define VCN_RAS_CNTL__VCPU_VCODEC_IH_EN__SHIFT 0x0
+#define VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN__SHIFT 0x4
+#define VCN_RAS_CNTL__VCPU_VCODEC_REARM__SHIFT 0x8
+#define VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN__SHIFT 0xc
+#define VCN_RAS_CNTL__VCPU_VCODEC_READY__SHIFT 0x10
+#define VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK 0x00000001L
+#define VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK 0x00000010L
+#define VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK 0x00000100L
+#define VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK 0x00001000L
+#define VCN_RAS_CNTL__VCPU_VCODEC_READY_MASK 0x00010000L
+
+
+// addressBlock: aid_uvd0_uvd_jpeg0_jpegnpdec
+//UVD_JPEG_CNTL
+#define UVD_JPEG_CNTL__REQUEST_EN__SHIFT 0x1
+#define UVD_JPEG_CNTL__ERR_RST_EN__SHIFT 0x2
+#define UVD_JPEG_CNTL__DBG_MUX_SEL__SHIFT 0x8
+#define UVD_JPEG_CNTL__FORMAT_CONV_EN__SHIFT 0x10
+#define UVD_JPEG_CNTL__VUP_MODE__SHIFT 0x11
+#define UVD_JPEG_CNTL__FC_TIMEOUT_EN__SHIFT 0x12
+#define UVD_JPEG_CNTL__ROI_CROP_EN__SHIFT 0x18
+#define UVD_JPEG_CNTL__ROI_CROP_EARLY_DECODE_STOP_DIS__SHIFT 0x19
+#define UVD_JPEG_CNTL__REQUEST_EN_MASK 0x00000002L
+#define UVD_JPEG_CNTL__ERR_RST_EN_MASK 0x00000004L
+#define UVD_JPEG_CNTL__DBG_MUX_SEL_MASK 0x00007F00L
+#define UVD_JPEG_CNTL__FORMAT_CONV_EN_MASK 0x00010000L
+#define UVD_JPEG_CNTL__VUP_MODE_MASK 0x00020000L
+#define UVD_JPEG_CNTL__FC_TIMEOUT_EN_MASK 0x00040000L
+#define UVD_JPEG_CNTL__ROI_CROP_EN_MASK 0x01000000L
+#define UVD_JPEG_CNTL__ROI_CROP_EARLY_DECODE_STOP_DIS_MASK 0x02000000L
+//UVD_JPEG_RB_BASE
+#define UVD_JPEG_RB_BASE__RB_BYTE_OFF__SHIFT 0x0
+#define UVD_JPEG_RB_BASE__RB_BASE__SHIFT 0x6
+#define UVD_JPEG_RB_BASE__RB_BYTE_OFF_MASK 0x0000003FL
+#define UVD_JPEG_RB_BASE__RB_BASE_MASK 0xFFFFFFC0L
+//UVD_JPEG_RB_WPTR
+#define UVD_JPEG_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JPEG_RB_WPTR__RB_WPTR_MASK 0x3FFFFFF0L
+//UVD_JPEG_RB_RPTR
+#define UVD_JPEG_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JPEG_RB_RPTR__RB_RPTR_MASK 0x3FFFFFF0L
+//UVD_JPEG_RB_SIZE
+#define UVD_JPEG_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JPEG_RB_SIZE__RB_SIZE_MASK 0x3FFFFFF0L
+//UVD_JPEG_DEC_CNT
+#define UVD_JPEG_DEC_CNT__DECODE_COUNT__SHIFT 0x0
+#define UVD_JPEG_DEC_CNT__DECODE_COUNT_MASK 0xFFFFFFFFL
+//UVD_JPEG_SPS_INFO
+#define UVD_JPEG_SPS_INFO__PIC_WIDTH__SHIFT 0x0
+#define UVD_JPEG_SPS_INFO__PIC_HEIGHT__SHIFT 0x10
+#define UVD_JPEG_SPS_INFO__PIC_WIDTH_MASK 0x0000FFFFL
+#define UVD_JPEG_SPS_INFO__PIC_HEIGHT_MASK 0xFFFF0000L
+//UVD_JPEG_SPS1_INFO
+#define UVD_JPEG_SPS1_INFO__CHROMA_FORMAT_IDC__SHIFT 0x0
+#define UVD_JPEG_SPS1_INFO__YUV422_SUBFORMAT__SHIFT 0x3
+#define UVD_JPEG_SPS1_INFO__OUT_FMT_422__SHIFT 0x4
+#define UVD_JPEG_SPS1_INFO__CHROMA_FORMAT_IDC_MASK 0x00000007L
+#define UVD_JPEG_SPS1_INFO__YUV422_SUBFORMAT_MASK 0x00000008L
+#define UVD_JPEG_SPS1_INFO__OUT_FMT_422_MASK 0x00000010L
+//UVD_JPEG_RE_TIMER
+#define UVD_JPEG_RE_TIMER__TIMER_OUT__SHIFT 0x0
+#define UVD_JPEG_RE_TIMER__TIMER_OUT_EN__SHIFT 0x10
+#define UVD_JPEG_RE_TIMER__TIMER_OUT_MASK 0x000000FFL
+#define UVD_JPEG_RE_TIMER__TIMER_OUT_EN_MASK 0x00010000L
+//UVD_JPEG_DEC_SCRATCH0
+#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+//UVD_JPEG_INT_EN
+#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN__SHIFT 0x0
+#define UVD_JPEG_INT_EN__JOB_AVAIL_EN__SHIFT 0x1
+#define UVD_JPEG_INT_EN__FENCE_VAL_EN__SHIFT 0x2
+#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN__SHIFT 0x6
+#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN__SHIFT 0x7
+#define UVD_JPEG_INT_EN__EOI_ERR_EN__SHIFT 0x8
+#define UVD_JPEG_INT_EN__HFM_ERR_EN__SHIFT 0x9
+#define UVD_JPEG_INT_EN__RST_ERR_EN__SHIFT 0xa
+#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN__SHIFT 0xb
+#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN__SHIFT 0xc
+#define UVD_JPEG_INT_EN__MARKER_ERR_EN__SHIFT 0xd
+#define UVD_JPEG_INT_EN__FMT_ERR_EN__SHIFT 0xe
+#define UVD_JPEG_INT_EN__PROFILE_ERR_EN__SHIFT 0xf
+#define UVD_JPEG_INT_EN__FC_TIMEOUT_ERR_EN__SHIFT 0x10
+#define UVD_JPEG_INT_EN__FC_FMT_ERR_EN__SHIFT 0x11
+#define UVD_JPEG_INT_EN__FC_SRC_ERR_EN__SHIFT 0x12
+#define UVD_JPEG_INT_EN__CROP_SIZE_ERR_EN__SHIFT 0x13
+#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN_MASK 0x00000001L
+#define UVD_JPEG_INT_EN__JOB_AVAIL_EN_MASK 0x00000002L
+#define UVD_JPEG_INT_EN__FENCE_VAL_EN_MASK 0x00000004L
+#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN_MASK 0x00000040L
+#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN_MASK 0x00000080L
+#define UVD_JPEG_INT_EN__EOI_ERR_EN_MASK 0x00000100L
+#define UVD_JPEG_INT_EN__HFM_ERR_EN_MASK 0x00000200L
+#define UVD_JPEG_INT_EN__RST_ERR_EN_MASK 0x00000400L
+#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN_MASK 0x00000800L
+#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN_MASK 0x00001000L
+#define UVD_JPEG_INT_EN__MARKER_ERR_EN_MASK 0x00002000L
+#define UVD_JPEG_INT_EN__FMT_ERR_EN_MASK 0x00004000L
+#define UVD_JPEG_INT_EN__PROFILE_ERR_EN_MASK 0x00008000L
+#define UVD_JPEG_INT_EN__FC_TIMEOUT_ERR_EN_MASK 0x00010000L
+#define UVD_JPEG_INT_EN__FC_FMT_ERR_EN_MASK 0x00020000L
+#define UVD_JPEG_INT_EN__FC_SRC_ERR_EN_MASK 0x00040000L
+#define UVD_JPEG_INT_EN__CROP_SIZE_ERR_EN_MASK 0x00080000L
+//UVD_JPEG_INT_STAT
+#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT__SHIFT 0x0
+#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT__SHIFT 0x1
+#define UVD_JPEG_INT_STAT__FENCE_VAL_INT__SHIFT 0x2
+#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT__SHIFT 0x6
+#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT__SHIFT 0x7
+#define UVD_JPEG_INT_STAT__EOI_ERR_INT__SHIFT 0x8
+#define UVD_JPEG_INT_STAT__HFM_ERR_INT__SHIFT 0x9
+#define UVD_JPEG_INT_STAT__RST_ERR_INT__SHIFT 0xa
+#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT__SHIFT 0xb
+#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT__SHIFT 0xc
+#define UVD_JPEG_INT_STAT__MARKER_ERR_INT__SHIFT 0xd
+#define UVD_JPEG_INT_STAT__FMT_ERR_INT__SHIFT 0xe
+#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT__SHIFT 0xf
+#define UVD_JPEG_INT_STAT__FC_TIMEOUT_ERR_INT__SHIFT 0x10
+#define UVD_JPEG_INT_STAT__FC_FMT_ERR_INT__SHIFT 0x11
+#define UVD_JPEG_INT_STAT__FC_SRC_ERR_INT__SHIFT 0x12
+#define UVD_JPEG_INT_STAT__CROP_SIZE_ERR_INT__SHIFT 0x13
+#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT_MASK 0x00000001L
+#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT_MASK 0x00000002L
+#define UVD_JPEG_INT_STAT__FENCE_VAL_INT_MASK 0x00000004L
+#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT_MASK 0x00000040L
+#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT_MASK 0x00000080L
+#define UVD_JPEG_INT_STAT__EOI_ERR_INT_MASK 0x00000100L
+#define UVD_JPEG_INT_STAT__HFM_ERR_INT_MASK 0x00000200L
+#define UVD_JPEG_INT_STAT__RST_ERR_INT_MASK 0x00000400L
+#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT_MASK 0x00000800L
+#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT_MASK 0x00001000L
+#define UVD_JPEG_INT_STAT__MARKER_ERR_INT_MASK 0x00002000L
+#define UVD_JPEG_INT_STAT__FMT_ERR_INT_MASK 0x00004000L
+#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT_MASK 0x00008000L
+#define UVD_JPEG_INT_STAT__FC_TIMEOUT_ERR_INT_MASK 0x00010000L
+#define UVD_JPEG_INT_STAT__FC_FMT_ERR_INT_MASK 0x00020000L
+#define UVD_JPEG_INT_STAT__FC_SRC_ERR_INT_MASK 0x00040000L
+#define UVD_JPEG_INT_STAT__CROP_SIZE_ERR_INT_MASK 0x00080000L
+//UVD_JPEG_TIER_CNTL0
+#define UVD_JPEG_TIER_CNTL0__TIER_SEL__SHIFT 0x0
+#define UVD_JPEG_TIER_CNTL0__Y_COMP_ID__SHIFT 0x2
+#define UVD_JPEG_TIER_CNTL0__U_COMP_ID__SHIFT 0x4
+#define UVD_JPEG_TIER_CNTL0__V_COMP_ID__SHIFT 0x6
+#define UVD_JPEG_TIER_CNTL0__Y_H_SAMP_FAC__SHIFT 0x8
+#define UVD_JPEG_TIER_CNTL0__Y_V_SAMP_FAC__SHIFT 0xb
+#define UVD_JPEG_TIER_CNTL0__U_H_SAMP_FAC__SHIFT 0xe
+#define UVD_JPEG_TIER_CNTL0__U_V_SAMP_FAC__SHIFT 0x11
+#define UVD_JPEG_TIER_CNTL0__V_H_SAMP_FAC__SHIFT 0x14
+#define UVD_JPEG_TIER_CNTL0__V_V_SAMP_FAC__SHIFT 0x17
+#define UVD_JPEG_TIER_CNTL0__Y_TQ__SHIFT 0x1a
+#define UVD_JPEG_TIER_CNTL0__U_TQ__SHIFT 0x1c
+#define UVD_JPEG_TIER_CNTL0__V_TQ__SHIFT 0x1e
+#define UVD_JPEG_TIER_CNTL0__TIER_SEL_MASK 0x00000003L
+#define UVD_JPEG_TIER_CNTL0__Y_COMP_ID_MASK 0x0000000CL
+#define UVD_JPEG_TIER_CNTL0__U_COMP_ID_MASK 0x00000030L
+#define UVD_JPEG_TIER_CNTL0__V_COMP_ID_MASK 0x000000C0L
+#define UVD_JPEG_TIER_CNTL0__Y_H_SAMP_FAC_MASK 0x00000700L
+#define UVD_JPEG_TIER_CNTL0__Y_V_SAMP_FAC_MASK 0x00003800L
+#define UVD_JPEG_TIER_CNTL0__U_H_SAMP_FAC_MASK 0x0001C000L
+#define UVD_JPEG_TIER_CNTL0__U_V_SAMP_FAC_MASK 0x000E0000L
+#define UVD_JPEG_TIER_CNTL0__V_H_SAMP_FAC_MASK 0x00700000L
+#define UVD_JPEG_TIER_CNTL0__V_V_SAMP_FAC_MASK 0x03800000L
+#define UVD_JPEG_TIER_CNTL0__Y_TQ_MASK 0x0C000000L
+#define UVD_JPEG_TIER_CNTL0__U_TQ_MASK 0x30000000L
+#define UVD_JPEG_TIER_CNTL0__V_TQ_MASK 0xC0000000L
+//UVD_JPEG_TIER_CNTL1
+#define UVD_JPEG_TIER_CNTL1__SRC_WIDTH__SHIFT 0x0
+#define UVD_JPEG_TIER_CNTL1__SRC_HEIGHT__SHIFT 0x10
+#define UVD_JPEG_TIER_CNTL1__SRC_WIDTH_MASK 0x0000FFFFL
+#define UVD_JPEG_TIER_CNTL1__SRC_HEIGHT_MASK 0xFFFF0000L
+//UVD_JPEG_TIER_CNTL2
+#define UVD_JPEG_TIER_CNTL2__TBL_ECS_SEL__SHIFT 0x0
+#define UVD_JPEG_TIER_CNTL2__TBL_TYPE__SHIFT 0x1
+#define UVD_JPEG_TIER_CNTL2__TQ__SHIFT 0x2
+#define UVD_JPEG_TIER_CNTL2__TH__SHIFT 0x4
+#define UVD_JPEG_TIER_CNTL2__TC__SHIFT 0x6
+#define UVD_JPEG_TIER_CNTL2__TD__SHIFT 0x7
+#define UVD_JPEG_TIER_CNTL2__TA__SHIFT 0xa
+#define UVD_JPEG_TIER_CNTL2__TIER2_HTBL_CNTLEN__SHIFT 0xe
+#define UVD_JPEG_TIER_CNTL2__DRI_VAL__SHIFT 0x10
+#define UVD_JPEG_TIER_CNTL2__TBL_ECS_SEL_MASK 0x00000001L
+#define UVD_JPEG_TIER_CNTL2__TBL_TYPE_MASK 0x00000002L
+#define UVD_JPEG_TIER_CNTL2__TQ_MASK 0x0000000CL
+#define UVD_JPEG_TIER_CNTL2__TH_MASK 0x00000030L
+#define UVD_JPEG_TIER_CNTL2__TC_MASK 0x00000040L
+#define UVD_JPEG_TIER_CNTL2__TD_MASK 0x00000380L
+#define UVD_JPEG_TIER_CNTL2__TA_MASK 0x00001C00L
+#define UVD_JPEG_TIER_CNTL2__TIER2_HTBL_CNTLEN_MASK 0x00004000L
+#define UVD_JPEG_TIER_CNTL2__DRI_VAL_MASK 0xFFFF0000L
+//UVD_JPEG_TIER_STATUS
+#define UVD_JPEG_TIER_STATUS__BSI_FETCH_DONE__SHIFT 0x0
+#define UVD_JPEG_TIER_STATUS__DECODE_DONE__SHIFT 0x1
+#define UVD_JPEG_TIER_STATUS__BSI_FETCH_DONE_MASK 0x00000001L
+#define UVD_JPEG_TIER_STATUS__DECODE_DONE_MASK 0x00000002L
+
+
+// addressBlock: aid_uvd0_uvd_jpeg_sclk0_jpegnpsclkdec
+//UVD_JPEG_OUTBUF_CNTL
+#define UVD_JPEG_OUTBUF_CNTL__OUTBUF_CNT__SHIFT 0x0
+#define UVD_JPEG_OUTBUF_CNTL__HGT_ALIGN__SHIFT 0x2
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_DECODE_DONE_FIX__SHIFT 0x6
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_MAX_CNT__SHIFT 0x7
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_TIMER__SHIFT 0x9
+#define UVD_JPEG_OUTBUF_CNTL__DIS_OBUF_AVAIL_CHECK__SHIFT 0x10
+#define UVD_JPEG_OUTBUF_CNTL__OUTBUF_CNT_MASK 0x00000003L
+#define UVD_JPEG_OUTBUF_CNTL__HGT_ALIGN_MASK 0x00000004L
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_DECODE_DONE_FIX_MASK 0x00000040L
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_MAX_CNT_MASK 0x00000180L
+#define UVD_JPEG_OUTBUF_CNTL__JPEG0_WR_COMB_TIMER_MASK 0x00001E00L
+#define UVD_JPEG_OUTBUF_CNTL__DIS_OBUF_AVAIL_CHECK_MASK 0x00010000L
+//UVD_JPEG_OUTBUF_WPTR
+#define UVD_JPEG_OUTBUF_WPTR__OUTBUF_WPTR__SHIFT 0x0
+#define UVD_JPEG_OUTBUF_WPTR__OUTBUF_WPTR_MASK 0xFFFFFFFFL
+//UVD_JPEG_OUTBUF_RPTR
+#define UVD_JPEG_OUTBUF_RPTR__OUTBUF_RPTR__SHIFT 0x0
+#define UVD_JPEG_OUTBUF_RPTR__OUTBUF_RPTR_MASK 0xFFFFFFFFL
+//UVD_JPEG_PITCH
+#define UVD_JPEG_PITCH__PITCH__SHIFT 0x0
+#define UVD_JPEG_PITCH__PITCH_MASK 0xFFFFFFFFL
+//UVD_JPEG_UV_PITCH
+#define UVD_JPEG_UV_PITCH__UV_PITCH__SHIFT 0x0
+#define UVD_JPEG_UV_PITCH__UV_PITCH_MASK 0xFFFFFFFFL
+//JPEG_DEC_Y_GFX8_TILING_SURFACE
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L
+#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L
+//JPEG_DEC_UV_GFX8_TILING_SURFACE
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L
+#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L
+//JPEG_DEC_GFX8_ADDR_CONFIG
+#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//JPEG_DEC_Y_GFX10_TILING_SURFACE
+#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0
+#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL
+//JPEG_DEC_UV_GFX10_TILING_SURFACE
+#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0
+#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL
+//JPEG_DEC_GFX10_ADDR_CONFIG
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//JPEG_DEC_ADDR_MODE
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y__SHIFT 0x0
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV__SHIFT 0x2
+#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL__SHIFT 0xc
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y_MASK 0x00000003L
+#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV_MASK 0x0000000CL
+#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL_MASK 0x00007000L
+//UVD_JPEG_OUTPUT_XY
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_X__SHIFT 0x0
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_Y__SHIFT 0x10
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_X_MASK 0x00003FFFL
+#define UVD_JPEG_OUTPUT_XY__OUTPUT_Y_MASK 0x3FFF0000L
+//UVD_JPEG_GPCOM_CMD
+#define UVD_JPEG_GPCOM_CMD__CMD__SHIFT 0x1
+#define UVD_JPEG_GPCOM_CMD__CMD_MASK 0x0000000EL
+//UVD_JPEG_GPCOM_DATA0
+#define UVD_JPEG_GPCOM_DATA0__DATA0__SHIFT 0x0
+#define UVD_JPEG_GPCOM_DATA0__DATA0_MASK 0xFFFFFFFFL
+//UVD_JPEG_GPCOM_DATA1
+#define UVD_JPEG_GPCOM_DATA1__DATA1__SHIFT 0x0
+#define UVD_JPEG_GPCOM_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_JPEG_SCRATCH1
+#define UVD_JPEG_SCRATCH1__SCRATCH1__SHIFT 0x0
+#define UVD_JPEG_SCRATCH1__SCRATCH1_MASK 0xFFFFFFFFL
+//UVD_JPEG_DEC_SOFT_RST
+#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET__SHIFT 0x0
+#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS__SHIFT 0x10
+#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET_MASK 0x00000001L
+#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS_MASK 0x00010000L
+
+// addressBlock: aid_uvd0_vcn_edcc_dec
+//VCN_UE_ERR_STATUS_LO_VIDD
+#define VCN_UE_ERR_STATUS_LO_VIDD__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_VIDD__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_VIDD__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_VIDD__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_VIDD__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_VIDD__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_VIDD__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_VIDD__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_VIDD
+#define VCN_UE_ERR_STATUS_HI_VIDD__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_VIDD__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_VIDD__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_VIDD__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_VIDD__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_VIDD__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_VIDD__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_VIDD__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_VIDD__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_VIDD__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_VIDD__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_VIDD__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_VIDD__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_VIDD__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_VIDD__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_VIDD__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_VIDV
+#define VCN_UE_ERR_STATUS_LO_VIDV__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_VIDV__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_VIDV__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_VIDV__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_VIDV__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_VIDV__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_VIDV__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_VIDV__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_VIDV
+#define VCN_UE_ERR_STATUS_HI_VIDV__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_VIDV__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_VIDV__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_VIDV__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_VIDV__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_VIDV__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_VIDV__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_VIDV__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_VIDV__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_VIDV__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_VIDV__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_VIDV__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_VIDV__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_VIDV__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_VIDV__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_VIDV__Err_clr_MASK 0x80000000L
+//VCN_CE_ERR_STATUS_LO_MMSCHD
+#define VCN_CE_ERR_STATUS_LO_MMSCHD__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_CE_ERR_STATUS_LO_MMSCHD__Address_Valid_Flag__SHIFT 0x1
+#define VCN_CE_ERR_STATUS_LO_MMSCHD__Address__SHIFT 0x2
+#define VCN_CE_ERR_STATUS_LO_MMSCHD__Memory_id__SHIFT 0x18
+#define VCN_CE_ERR_STATUS_LO_MMSCHD__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_CE_ERR_STATUS_LO_MMSCHD__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_CE_ERR_STATUS_LO_MMSCHD__Address_MASK 0x00FFFFFCL
+#define VCN_CE_ERR_STATUS_LO_MMSCHD__Memory_id_MASK 0xFF000000L
+//VCN_CE_ERR_STATUS_HI_MMSCHD
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__ECC__SHIFT 0x0
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__Other__SHIFT 0x1
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_Info__SHIFT 0x3
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__CE_Cnt__SHIFT 0x17
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__Poison__SHIFT 0x1c
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__RESERVED__SHIFT 0x1d
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_clr__SHIFT 0x1f
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__ECC_MASK 0x00000001L
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__Other_MASK 0x00000002L
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_Info_MASK 0x007FFFF8L
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__CE_Cnt_MASK 0x03800000L
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__Poison_MASK 0x10000000L
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__RESERVED_MASK 0x60000000L
+#define VCN_CE_ERR_STATUS_HI_MMSCHD__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG0S
+#define VCN_UE_ERR_STATUS_LO_JPEG0S__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG0S__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG0S__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG0S__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG0S__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG0S__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG0S__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG0S__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG0S
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG0S__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG0D
+#define VCN_UE_ERR_STATUS_LO_JPEG0D__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG0D__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG0D__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG0D__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG0D__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG0D__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG0D__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG0D__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG0D
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG0D__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG1S
+#define VCN_UE_ERR_STATUS_LO_JPEG1S__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG1S__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG1S__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG1S__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG1S__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG1S__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG1S__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG1S__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG1S
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG1S__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG1D
+#define VCN_UE_ERR_STATUS_LO_JPEG1D__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG1D__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG1D__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG1D__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG1D__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG1D__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG1D__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG1D__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG1D
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG1D__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG2S
+#define VCN_UE_ERR_STATUS_LO_JPEG2S__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG2S__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG2S__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG2S__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG2S__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG2S__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG2S__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG2S__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG2S
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG2S__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG2D
+#define VCN_UE_ERR_STATUS_LO_JPEG2D__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG2D__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG2D__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG2D__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG2D__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG2D__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG2D__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG2D__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG2D
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG2D__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG3S
+#define VCN_UE_ERR_STATUS_LO_JPEG3S__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG3S__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG3S__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG3S__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG3S__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG3S__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG3S__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG3S__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG3S
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG3S__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG3D
+#define VCN_UE_ERR_STATUS_LO_JPEG3D__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG3D__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG3D__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG3D__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG3D__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG3D__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG3D__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG3D__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG3D
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG3D__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG4S
+#define VCN_UE_ERR_STATUS_LO_JPEG4S__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG4S__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG4S__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG4S__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG4S__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG4S__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG4S__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG4S__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG4S
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG4S__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG4D
+#define VCN_UE_ERR_STATUS_LO_JPEG4D__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG4D__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG4D__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG4D__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG4D__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG4D__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG4D__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG4D__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG4D
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG4D__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG5S
+#define VCN_UE_ERR_STATUS_LO_JPEG5S__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG5S__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG5S__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG5S__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG5S__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG5S__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG5S__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG5S__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG5S
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG5S__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG5D
+#define VCN_UE_ERR_STATUS_LO_JPEG5D__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG5D__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG5D__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG5D__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG5D__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG5D__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG5D__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG5D__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG5D
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG5D__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG6S
+#define VCN_UE_ERR_STATUS_LO_JPEG6S__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG6S__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG6S__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG6S__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG6S__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG6S__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG6S__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG6S__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG6S
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG6S__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG6D
+#define VCN_UE_ERR_STATUS_LO_JPEG6D__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG6D__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG6D__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG6D__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG6D__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG6D__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG6D__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG6D__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG6D
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG6D__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG7S
+#define VCN_UE_ERR_STATUS_LO_JPEG7S__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG7S__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG7S__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG7S__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG7S__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG7S__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG7S__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG7S__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG7S
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG7S__Err_clr_MASK 0x80000000L
+//VCN_UE_ERR_STATUS_LO_JPEG7D
+#define VCN_UE_ERR_STATUS_LO_JPEG7D__Err_Status_Valid_Flag__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_LO_JPEG7D__Address_Valid_Flag__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_LO_JPEG7D__Address__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_LO_JPEG7D__Memory_id__SHIFT 0x18
+#define VCN_UE_ERR_STATUS_LO_JPEG7D__Err_Status_Valid_Flag_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_LO_JPEG7D__Address_Valid_Flag_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_LO_JPEG7D__Address_MASK 0x00FFFFFCL
+#define VCN_UE_ERR_STATUS_LO_JPEG7D__Memory_id_MASK 0xFF000000L
+//VCN_UE_ERR_STATUS_HI_JPEG7D
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__ECC__SHIFT 0x0
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__Parity__SHIFT 0x1
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_Info_Valid_Flag__SHIFT 0x2
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_Info__SHIFT 0x3
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__UE_Cnt__SHIFT 0x17
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__FED_Cnt__SHIFT 0x1a
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__RESERVED__SHIFT 0x1d
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_clr__SHIFT 0x1f
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__ECC_MASK 0x00000001L
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__Parity_MASK 0x00000002L
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_Info_Valid_Flag_MASK 0x00000004L
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_Info_MASK 0x007FFFF8L
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__UE_Cnt_MASK 0x03800000L
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__FED_Cnt_MASK 0x1C000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__RESERVED_MASK 0x60000000L
+#define VCN_UE_ERR_STATUS_HI_JPEG7D__Err_clr_MASK 0x80000000L
+
+// addressBlock: aid_uvd0_uvd_jrbc0_uvd_jrbc_dec
+//UVD_JRBC0_UVD_JRBC_RB_WPTR
+#define UVD_JRBC0_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC0_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC0_UVD_JRBC_RB_CNTL
+#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC0_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC0_UVD_JRBC_IB_SIZE
+#define UVD_JRBC0_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC0_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC0_UVD_JRBC_URGENT_CNTL
+#define UVD_JRBC0_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC0_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC0_UVD_JRBC_RB_REF_DATA
+#define UVD_JRBC0_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC0_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER
+#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC0_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC0_UVD_JRBC_SOFT_RESET
+#define UVD_JRBC0_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC0_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC0_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC0_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC0_UVD_JRBC_STATUS
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC0_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC0_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC0_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC0_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC0_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC0_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC0_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC0_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC0_UVD_JRBC_RB_RPTR
+#define UVD_JRBC0_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC0_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS
+#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC0_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS
+#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC0_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC0_UVD_JRBC_IB_SIZE_UPDATE
+#define UVD_JRBC0_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC0_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER
+#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC0_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC0_UVD_JRBC_IB_REF_DATA
+#define UVD_JRBC0_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC0_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC0_UVD_JPEG_PREEMPT_CMD
+#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC0_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA0
+#define UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA1
+#define UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JRBC0_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC0_UVD_JRBC_RB_SIZE
+#define UVD_JRBC0_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC0_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC0_UVD_JRBC_SCRATCH0
+#define UVD_JRBC0_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC0_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_uvd_jmi0_uvd_jmi_dec
+//UVD_JMI0_UVD_JPEG_DEC_PF_CTRL
+#define UVD_JMI0_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0
+#define UVD_JMI0_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1
+#define UVD_JMI0_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L
+#define UVD_JMI0_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L
+//UVD_JMI0_UVD_LMI_JRBC_CTRL
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI0_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI0_UVD_LMI_JPEG_CTRL
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI0_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI0_JPEG_LMI_DROP
+#define UVD_JMI0_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0
+#define UVD_JMI0_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1
+#define UVD_JMI0_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2
+#define UVD_JMI0_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3
+#define UVD_JMI0_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4
+#define UVD_JMI0_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L
+#define UVD_JMI0_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L
+#define UVD_JMI0_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L
+#define UVD_JMI0_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L
+#define UVD_JMI0_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L
+//UVD_JMI0_UVD_LMI_JRBC_IB_VMID
+#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI0_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI0_UVD_LMI_JRBC_RB_VMID
+#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI0_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI0_UVD_LMI_JPEG_VMID
+#define UVD_JMI0_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4
+#define UVD_JMI0_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8
+#define UVD_JMI0_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL
+#define UVD_JMI0_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L
+#define UVD_JMI0_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L
+//UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW
+#define UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH
+#define UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JPEG_PREEMPT_VMID
+#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI0_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L
+//UVD_JMI0_UVD_JMI_ATOMIC_CNTL
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L
+//UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW
+#define UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI0_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_LOW
+#define UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH
+#define UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW
+#define UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_LOW
+#define UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH
+#define UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI0_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI0_UVD_JMI_ATOMIC_CNTL2
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L
+#define UVD_JMI0_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L
+
+
+// addressBlock: aid_uvd0_uvd_jmi_common_dec
+//UVD_JADP_MCIF_URGENT_CTRL
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_WATERMARK__SHIFT 0x0
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_WATERMARK__SHIFT 0x6
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_RD_URGENT_TIMER__SHIFT 0xb
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_URGENT_PROG_STEP__SHIFT 0x11
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_URGENT_PROG_STEP__SHIFT 0x15
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_QOS_EN__SHIFT 0x19
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_QOS_EN__SHIFT 0x1a
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_WATERMARK_MASK 0x0000003FL
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_WATERMARK_MASK 0x000007C0L
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_RD_URGENT_TIMER_MASK 0x0001F800L
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_URGENT_PROG_STEP_MASK 0x001E0000L
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_URGENT_PROG_STEP_MASK 0x01E00000L
+#define UVD_JADP_MCIF_URGENT_CTRL__WR_QOS_EN_MASK 0x02000000L
+#define UVD_JADP_MCIF_URGENT_CTRL__RD_QOS_EN_MASK 0x04000000L
+//UVD_JMI_URGENT_CTRL
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x4
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x10
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0x14
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x000000F0L
+#define UVD_JMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00010000L
+#define UVD_JMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00F00000L
+//UVD_JMI_CTRL
+#define UVD_JMI_CTRL__STALL_MC_ARB__SHIFT 0x0
+#define UVD_JMI_CTRL__MASK_MC_URGENT__SHIFT 0x1
+#define UVD_JMI_CTRL__ASSERT_MC_URGENT__SHIFT 0x2
+#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER__SHIFT 0x8
+#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER__SHIFT 0x10
+#define UVD_JMI_CTRL__STALL_MC_ARB_MASK 0x00000001L
+#define UVD_JMI_CTRL__MASK_MC_URGENT_MASK 0x00000002L
+#define UVD_JMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000004L
+#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER_MASK 0x0000FF00L
+#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER_MASK 0x00FF0000L
+//JPEG_MEMCHECK_CLAMPING_CNTL
+#define JPEG_MEMCHECK_CLAMPING_CNTL__CLAMP_TO_SAFE_ADDR_EN__SHIFT 0x0
+#define JPEG_MEMCHECK_CLAMPING_CNTL__CLAMP_TO_SAFE_ADDR_EN_MASK 0x00000001L
+//JPEG_MEMCHECK_SAFE_ADDR
+#define JPEG_MEMCHECK_SAFE_ADDR__MEMCHECK_SAFE_ADDR__SHIFT 0x0
+#define JPEG_MEMCHECK_SAFE_ADDR__MEMCHECK_SAFE_ADDR_MASK 0xFFFFFFFFL
+//JPEG_MEMCHECK_SAFE_ADDR_64BIT
+#define JPEG_MEMCHECK_SAFE_ADDR_64BIT__MEMCHECK_SAFE_ADDR_64BIT__SHIFT 0x0
+#define JPEG_MEMCHECK_SAFE_ADDR_64BIT__MEMCHECK_SAFE_ADDR_64BIT_MASK 0xFFFFFFFFL
+//UVD_JMI_LAT_CTRL
+#define UVD_JMI_LAT_CTRL__SCALE__SHIFT 0x0
+#define UVD_JMI_LAT_CTRL__MAX_START__SHIFT 0x8
+#define UVD_JMI_LAT_CTRL__MIN_START__SHIFT 0x9
+#define UVD_JMI_LAT_CTRL__AVG_START__SHIFT 0xa
+#define UVD_JMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb
+#define UVD_JMI_LAT_CTRL__SKIP__SHIFT 0x10
+#define UVD_JMI_LAT_CTRL__SCALE_MASK 0x000000FFL
+#define UVD_JMI_LAT_CTRL__MAX_START_MASK 0x00000100L
+#define UVD_JMI_LAT_CTRL__MIN_START_MASK 0x00000200L
+#define UVD_JMI_LAT_CTRL__AVG_START_MASK 0x00000400L
+#define UVD_JMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L
+#define UVD_JMI_LAT_CTRL__SKIP_MASK 0x000F0000L
+//UVD_JMI_LAT_CNTR
+#define UVD_JMI_LAT_CNTR__MAX_LAT__SHIFT 0x0
+#define UVD_JMI_LAT_CNTR__MIN_LAT__SHIFT 0x8
+#define UVD_JMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL
+#define UVD_JMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L
+//UVD_JMI_AVG_LAT_CNTR
+#define UVD_JMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10
+#define UVD_JMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L
+#define UVD_JMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L
+//UVD_JMI_PERFMON_CTRL
+#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0
+#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8
+#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L
+#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L
+//UVD_JMI_PERFMON_COUNT_LO
+#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0
+#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL
+//UVD_JMI_PERFMON_COUNT_HI
+#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0
+#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL
+//UVD_JMI_CLEAN_STATUS
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN__SHIFT 0x0
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_RAW__SHIFT 0x1
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN__SHIFT 0x2
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_RAW__SHIFT 0x3
+#define UVD_JMI_CLEAN_STATUS__MC_WRITE_PENDING__SHIFT 0x4
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_READ_CLEAN__SHIFT 0x8
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE1_READ_CLEAN__SHIFT 0x9
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE2_READ_CLEAN__SHIFT 0xa
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE3_READ_CLEAN__SHIFT 0xb
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE4_READ_CLEAN__SHIFT 0xc
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE5_READ_CLEAN__SHIFT 0xd
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE6_READ_CLEAN__SHIFT 0xe
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE7_READ_CLEAN__SHIFT 0xf
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_WRITE_CLEAN__SHIFT 0x10
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE1_WRITE_CLEAN__SHIFT 0x11
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE2_WRITE_CLEAN__SHIFT 0x12
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE3_WRITE_CLEAN__SHIFT 0x13
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE4_WRITE_CLEAN__SHIFT 0x14
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE5_WRITE_CLEAN__SHIFT 0x15
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE6_WRITE_CLEAN__SHIFT 0x16
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE7_WRITE_CLEAN__SHIFT 0x17
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_MASK 0x00000001L
+#define UVD_JMI_CLEAN_STATUS__LMI_READ_CLEAN_RAW_MASK 0x00000002L
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_MASK 0x00000004L
+#define UVD_JMI_CLEAN_STATUS__LMI_WRITE_CLEAN_RAW_MASK 0x00000008L
+#define UVD_JMI_CLEAN_STATUS__MC_WRITE_PENDING_MASK 0x00000010L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_READ_CLEAN_MASK 0x00000100L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE1_READ_CLEAN_MASK 0x00000200L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE2_READ_CLEAN_MASK 0x00000400L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE3_READ_CLEAN_MASK 0x00000800L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE4_READ_CLEAN_MASK 0x00001000L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE5_READ_CLEAN_MASK 0x00002000L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE6_READ_CLEAN_MASK 0x00004000L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE7_READ_CLEAN_MASK 0x00008000L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE0_WRITE_CLEAN_MASK 0x00010000L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE1_WRITE_CLEAN_MASK 0x00020000L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE2_WRITE_CLEAN_MASK 0x00040000L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE3_WRITE_CLEAN_MASK 0x00080000L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE4_WRITE_CLEAN_MASK 0x00100000L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE5_WRITE_CLEAN_MASK 0x00200000L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE6_WRITE_CLEAN_MASK 0x00400000L
+#define UVD_JMI_CLEAN_STATUS__DJPEG_CORE7_WRITE_CLEAN_MASK 0x00800000L
+//UVD_JMI_CNTL
+#define UVD_JMI_CNTL__SOFT_RESET__SHIFT 0x0
+#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX__SHIFT 0x8
+#define UVD_JMI_CNTL__SOFT_RESET_MASK 0x00000001L
+#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX_MASK 0x0003FF00L
+
+
+// addressBlock: aid_uvd0_uvd_jpeg_common_dec
+//JPEG_SOFT_RESET_STATUS
+#define JPEG_SOFT_RESET_STATUS__JPEG0_DEC_RESET_STATUS__SHIFT 0x0
+#define JPEG_SOFT_RESET_STATUS__JPEG1_DEC_RESET_STATUS__SHIFT 0x1
+#define JPEG_SOFT_RESET_STATUS__JPEG2_DEC_RESET_STATUS__SHIFT 0x2
+#define JPEG_SOFT_RESET_STATUS__JPEG3_DEC_RESET_STATUS__SHIFT 0x3
+#define JPEG_SOFT_RESET_STATUS__JPEG4_DEC_RESET_STATUS__SHIFT 0x4
+#define JPEG_SOFT_RESET_STATUS__JPEG5_DEC_RESET_STATUS__SHIFT 0x5
+#define JPEG_SOFT_RESET_STATUS__JPEG6_DEC_RESET_STATUS__SHIFT 0x6
+#define JPEG_SOFT_RESET_STATUS__JPEG7_DEC_RESET_STATUS__SHIFT 0x7
+#define JPEG_SOFT_RESET_STATUS__DJRBC0_RESET_STATUS__SHIFT 0x8
+#define JPEG_SOFT_RESET_STATUS__DJRBC1_RESET_STATUS__SHIFT 0x9
+#define JPEG_SOFT_RESET_STATUS__DJRBC2_RESET_STATUS__SHIFT 0xa
+#define JPEG_SOFT_RESET_STATUS__DJRBC3_RESET_STATUS__SHIFT 0xb
+#define JPEG_SOFT_RESET_STATUS__DJRBC4_RESET_STATUS__SHIFT 0xc
+#define JPEG_SOFT_RESET_STATUS__DJRBC5_RESET_STATUS__SHIFT 0xd
+#define JPEG_SOFT_RESET_STATUS__DJRBC6_RESET_STATUS__SHIFT 0xe
+#define JPEG_SOFT_RESET_STATUS__DJRBC7_RESET_STATUS__SHIFT 0xf
+#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS__SHIFT 0x11
+#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS__SHIFT 0x12
+#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS__SHIFT 0x18
+#define JPEG_SOFT_RESET_STATUS__JPEG0_DEC_RESET_STATUS_MASK 0x00000001L
+#define JPEG_SOFT_RESET_STATUS__JPEG1_DEC_RESET_STATUS_MASK 0x00000002L
+#define JPEG_SOFT_RESET_STATUS__JPEG2_DEC_RESET_STATUS_MASK 0x00000004L
+#define JPEG_SOFT_RESET_STATUS__JPEG3_DEC_RESET_STATUS_MASK 0x00000008L
+#define JPEG_SOFT_RESET_STATUS__JPEG4_DEC_RESET_STATUS_MASK 0x00000010L
+#define JPEG_SOFT_RESET_STATUS__JPEG5_DEC_RESET_STATUS_MASK 0x00000020L
+#define JPEG_SOFT_RESET_STATUS__JPEG6_DEC_RESET_STATUS_MASK 0x00000040L
+#define JPEG_SOFT_RESET_STATUS__JPEG7_DEC_RESET_STATUS_MASK 0x00000080L
+#define JPEG_SOFT_RESET_STATUS__DJRBC0_RESET_STATUS_MASK 0x00000100L
+#define JPEG_SOFT_RESET_STATUS__DJRBC1_RESET_STATUS_MASK 0x00000200L
+#define JPEG_SOFT_RESET_STATUS__DJRBC2_RESET_STATUS_MASK 0x00000400L
+#define JPEG_SOFT_RESET_STATUS__DJRBC3_RESET_STATUS_MASK 0x00000800L
+#define JPEG_SOFT_RESET_STATUS__DJRBC4_RESET_STATUS_MASK 0x00001000L
+#define JPEG_SOFT_RESET_STATUS__DJRBC5_RESET_STATUS_MASK 0x00002000L
+#define JPEG_SOFT_RESET_STATUS__DJRBC6_RESET_STATUS_MASK 0x00004000L
+#define JPEG_SOFT_RESET_STATUS__DJRBC7_RESET_STATUS_MASK 0x00008000L
+#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS_MASK 0x00020000L
+#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS_MASK 0x00040000L
+#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS_MASK 0x01000000L
+//JPEG_SYS_INT_EN
+#define JPEG_SYS_INT_EN__DJPEG0_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_EN__DJPEG1_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_EN__DJPEG2_CORE__SHIFT 0x2
+#define JPEG_SYS_INT_EN__DJPEG3_CORE__SHIFT 0x3
+#define JPEG_SYS_INT_EN__DJPEG4_CORE__SHIFT 0x4
+#define JPEG_SYS_INT_EN__DJPEG5_CORE__SHIFT 0x5
+#define JPEG_SYS_INT_EN__DJPEG6_CORE__SHIFT 0x6
+#define JPEG_SYS_INT_EN__DJPEG7_CORE__SHIFT 0x7
+#define JPEG_SYS_INT_EN__DJRBC0__SHIFT 0x8
+#define JPEG_SYS_INT_EN__DJRBC1__SHIFT 0x9
+#define JPEG_SYS_INT_EN__DJRBC2__SHIFT 0xa
+#define JPEG_SYS_INT_EN__DJRBC3__SHIFT 0xb
+#define JPEG_SYS_INT_EN__DJRBC4__SHIFT 0xc
+#define JPEG_SYS_INT_EN__DJRBC5__SHIFT 0xd
+#define JPEG_SYS_INT_EN__DJRBC6__SHIFT 0xe
+#define JPEG_SYS_INT_EN__DJRBC7__SHIFT 0xf
+#define JPEG_SYS_INT_EN__DJPEG0_PF_RPT__SHIFT 0x10
+#define JPEG_SYS_INT_EN__DJPEG1_PF_RPT__SHIFT 0x11
+#define JPEG_SYS_INT_EN__DJPEG2_PF_RPT__SHIFT 0x12
+#define JPEG_SYS_INT_EN__DJPEG3_PF_RPT__SHIFT 0x13
+#define JPEG_SYS_INT_EN__DJPEG4_PF_RPT__SHIFT 0x14
+#define JPEG_SYS_INT_EN__DJPEG5_PF_RPT__SHIFT 0x15
+#define JPEG_SYS_INT_EN__DJPEG6_PF_RPT__SHIFT 0x16
+#define JPEG_SYS_INT_EN__DJPEG7_PF_RPT__SHIFT 0x17
+#define JPEG_SYS_INT_EN__DJPEG0_RAS_CNTL__SHIFT 0x18
+#define JPEG_SYS_INT_EN__DJPEG1_RAS_CNTL__SHIFT 0x19
+#define JPEG_SYS_INT_EN__DJPEG0_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_EN__DJPEG1_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_EN__DJPEG2_CORE_MASK 0x00000004L
+#define JPEG_SYS_INT_EN__DJPEG3_CORE_MASK 0x00000008L
+#define JPEG_SYS_INT_EN__DJPEG4_CORE_MASK 0x00000010L
+#define JPEG_SYS_INT_EN__DJPEG5_CORE_MASK 0x00000020L
+#define JPEG_SYS_INT_EN__DJPEG6_CORE_MASK 0x00000040L
+#define JPEG_SYS_INT_EN__DJPEG7_CORE_MASK 0x00000080L
+#define JPEG_SYS_INT_EN__DJRBC0_MASK 0x00000100L
+#define JPEG_SYS_INT_EN__DJRBC1_MASK 0x00000200L
+#define JPEG_SYS_INT_EN__DJRBC2_MASK 0x00000400L
+#define JPEG_SYS_INT_EN__DJRBC3_MASK 0x00000800L
+#define JPEG_SYS_INT_EN__DJRBC4_MASK 0x00001000L
+#define JPEG_SYS_INT_EN__DJRBC5_MASK 0x00002000L
+#define JPEG_SYS_INT_EN__DJRBC6_MASK 0x00004000L
+#define JPEG_SYS_INT_EN__DJRBC7_MASK 0x00008000L
+#define JPEG_SYS_INT_EN__DJPEG0_PF_RPT_MASK 0x00010000L
+#define JPEG_SYS_INT_EN__DJPEG1_PF_RPT_MASK 0x00020000L
+#define JPEG_SYS_INT_EN__DJPEG2_PF_RPT_MASK 0x00040000L
+#define JPEG_SYS_INT_EN__DJPEG3_PF_RPT_MASK 0x00080000L
+#define JPEG_SYS_INT_EN__DJPEG4_PF_RPT_MASK 0x00100000L
+#define JPEG_SYS_INT_EN__DJPEG5_PF_RPT_MASK 0x00200000L
+#define JPEG_SYS_INT_EN__DJPEG6_PF_RPT_MASK 0x00400000L
+#define JPEG_SYS_INT_EN__DJPEG7_PF_RPT_MASK 0x00800000L
+#define JPEG_SYS_INT_EN__DJPEG0_RAS_CNTL_MASK 0x01000000L
+#define JPEG_SYS_INT_EN__DJPEG1_RAS_CNTL_MASK 0x02000000L
+//JPEG_SYS_INT_EN1
+#define JPEG_SYS_INT_EN1__EJPEG_PF_RPT__SHIFT 0x0
+#define JPEG_SYS_INT_EN1__EJPEG_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_EN1__EJRBC__SHIFT 0x2
+#define JPEG_SYS_INT_EN1__EJPEG_RAS_CNTL__SHIFT 0x3
+#define JPEG_SYS_INT_EN1__EJPEG_PF_RPT_MASK 0x00000001L
+#define JPEG_SYS_INT_EN1__EJPEG_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_EN1__EJRBC_MASK 0x00000004L
+#define JPEG_SYS_INT_EN1__EJPEG_RAS_CNTL_MASK 0x00000008L
+//JPEG_SYS_INT_STATUS
+#define JPEG_SYS_INT_STATUS__DJPEG0_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_STATUS__DJPEG1_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_STATUS__DJPEG2_CORE__SHIFT 0x2
+#define JPEG_SYS_INT_STATUS__DJPEG3_CORE__SHIFT 0x3
+#define JPEG_SYS_INT_STATUS__DJPEG4_CORE__SHIFT 0x4
+#define JPEG_SYS_INT_STATUS__DJPEG5_CORE__SHIFT 0x5
+#define JPEG_SYS_INT_STATUS__DJPEG6_CORE__SHIFT 0x6
+#define JPEG_SYS_INT_STATUS__DJPEG7_CORE__SHIFT 0x7
+#define JPEG_SYS_INT_STATUS__DJRBC0__SHIFT 0x8
+#define JPEG_SYS_INT_STATUS__DJRBC1__SHIFT 0x9
+#define JPEG_SYS_INT_STATUS__DJRBC2__SHIFT 0xa
+#define JPEG_SYS_INT_STATUS__DJRBC3__SHIFT 0xb
+#define JPEG_SYS_INT_STATUS__DJRBC4__SHIFT 0xc
+#define JPEG_SYS_INT_STATUS__DJRBC5__SHIFT 0xd
+#define JPEG_SYS_INT_STATUS__DJRBC6__SHIFT 0xe
+#define JPEG_SYS_INT_STATUS__DJRBC7__SHIFT 0xf
+#define JPEG_SYS_INT_STATUS__DJPEG0_PF_RPT__SHIFT 0x10
+#define JPEG_SYS_INT_STATUS__DJPEG1_PF_RPT__SHIFT 0x11
+#define JPEG_SYS_INT_STATUS__DJPEG2_PF_RPT__SHIFT 0x12
+#define JPEG_SYS_INT_STATUS__DJPEG3_PF_RPT__SHIFT 0x13
+#define JPEG_SYS_INT_STATUS__DJPEG4_PF_RPT__SHIFT 0x14
+#define JPEG_SYS_INT_STATUS__DJPEG5_PF_RPT__SHIFT 0x15
+#define JPEG_SYS_INT_STATUS__DJPEG6_PF_RPT__SHIFT 0x16
+#define JPEG_SYS_INT_STATUS__DJPEG7_PF_RPT__SHIFT 0x17
+#define JPEG_SYS_INT_STATUS__DJPEG0_RAS_CNTL__SHIFT 0x18
+#define JPEG_SYS_INT_STATUS__DJPEG1_RAS_CNTL__SHIFT 0x19
+#define JPEG_SYS_INT_STATUS__DJPEG0_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_STATUS__DJPEG1_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_STATUS__DJPEG2_CORE_MASK 0x00000004L
+#define JPEG_SYS_INT_STATUS__DJPEG3_CORE_MASK 0x00000008L
+#define JPEG_SYS_INT_STATUS__DJPEG4_CORE_MASK 0x00000010L
+#define JPEG_SYS_INT_STATUS__DJPEG5_CORE_MASK 0x00000020L
+#define JPEG_SYS_INT_STATUS__DJPEG6_CORE_MASK 0x00000040L
+#define JPEG_SYS_INT_STATUS__DJPEG7_CORE_MASK 0x00000080L
+#define JPEG_SYS_INT_STATUS__DJRBC0_MASK 0x00000100L
+#define JPEG_SYS_INT_STATUS__DJRBC1_MASK 0x00000200L
+#define JPEG_SYS_INT_STATUS__DJRBC2_MASK 0x00000400L
+#define JPEG_SYS_INT_STATUS__DJRBC3_MASK 0x00000800L
+#define JPEG_SYS_INT_STATUS__DJRBC4_MASK 0x00001000L
+#define JPEG_SYS_INT_STATUS__DJRBC5_MASK 0x00002000L
+#define JPEG_SYS_INT_STATUS__DJRBC6_MASK 0x00004000L
+#define JPEG_SYS_INT_STATUS__DJRBC7_MASK 0x00008000L
+#define JPEG_SYS_INT_STATUS__DJPEG0_PF_RPT_MASK 0x00010000L
+#define JPEG_SYS_INT_STATUS__DJPEG1_PF_RPT_MASK 0x00020000L
+#define JPEG_SYS_INT_STATUS__DJPEG2_PF_RPT_MASK 0x00040000L
+#define JPEG_SYS_INT_STATUS__DJPEG3_PF_RPT_MASK 0x00080000L
+#define JPEG_SYS_INT_STATUS__DJPEG4_PF_RPT_MASK 0x00100000L
+#define JPEG_SYS_INT_STATUS__DJPEG5_PF_RPT_MASK 0x00200000L
+#define JPEG_SYS_INT_STATUS__DJPEG6_PF_RPT_MASK 0x00400000L
+#define JPEG_SYS_INT_STATUS__DJPEG7_PF_RPT_MASK 0x00800000L
+#define JPEG_SYS_INT_STATUS__DJPEG0_RAS_CNTL_MASK 0x01000000L
+#define JPEG_SYS_INT_STATUS__DJPEG1_RAS_CNTL_MASK 0x02000000L
+//JPEG_SYS_INT_STATUS1
+#define JPEG_SYS_INT_STATUS1__EJPEG_PF_RPT__SHIFT 0x0
+#define JPEG_SYS_INT_STATUS1__EJPEG_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_STATUS1__EJRBC__SHIFT 0x2
+#define JPEG_SYS_INT_STATUS1__EJPEG_RAS_CNTL__SHIFT 0x3
+#define JPEG_SYS_INT_STATUS1__EJPEG_PF_RPT_MASK 0x00000001L
+#define JPEG_SYS_INT_STATUS1__EJPEG_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_STATUS1__EJRBC_MASK 0x00000004L
+#define JPEG_SYS_INT_STATUS1__EJPEG_RAS_CNTL_MASK 0x00000008L
+//JPEG_SYS_INT_ACK
+#define JPEG_SYS_INT_ACK__DJPEG0_CORE__SHIFT 0x0
+#define JPEG_SYS_INT_ACK__DJPEG1_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_ACK__DJPEG2_CORE__SHIFT 0x2
+#define JPEG_SYS_INT_ACK__DJPEG3_CORE__SHIFT 0x3
+#define JPEG_SYS_INT_ACK__DJPEG4_CORE__SHIFT 0x4
+#define JPEG_SYS_INT_ACK__DJPEG5_CORE__SHIFT 0x5
+#define JPEG_SYS_INT_ACK__DJPEG6_CORE__SHIFT 0x6
+#define JPEG_SYS_INT_ACK__DJPEG7_CORE__SHIFT 0x7
+#define JPEG_SYS_INT_ACK__DJRBC0__SHIFT 0x8
+#define JPEG_SYS_INT_ACK__DJRBC1__SHIFT 0x9
+#define JPEG_SYS_INT_ACK__DJRBC2__SHIFT 0xa
+#define JPEG_SYS_INT_ACK__DJRBC3__SHIFT 0xb
+#define JPEG_SYS_INT_ACK__DJRBC4__SHIFT 0xc
+#define JPEG_SYS_INT_ACK__DJRBC5__SHIFT 0xd
+#define JPEG_SYS_INT_ACK__DJRBC6__SHIFT 0xe
+#define JPEG_SYS_INT_ACK__DJRBC7__SHIFT 0xf
+#define JPEG_SYS_INT_ACK__DJPEG0_PF_RPT__SHIFT 0x10
+#define JPEG_SYS_INT_ACK__DJPEG1_PF_RPT__SHIFT 0x11
+#define JPEG_SYS_INT_ACK__DJPEG2_PF_RPT__SHIFT 0x12
+#define JPEG_SYS_INT_ACK__DJPEG3_PF_RPT__SHIFT 0x13
+#define JPEG_SYS_INT_ACK__DJPEG4_PF_RPT__SHIFT 0x14
+#define JPEG_SYS_INT_ACK__DJPEG5_PF_RPT__SHIFT 0x15
+#define JPEG_SYS_INT_ACK__DJPEG6_PF_RPT__SHIFT 0x16
+#define JPEG_SYS_INT_ACK__DJPEG7_PF_RPT__SHIFT 0x17
+#define JPEG_SYS_INT_ACK__DJPEG0_RAS_CNTL__SHIFT 0x18
+#define JPEG_SYS_INT_ACK__DJPEG1_RAS_CNTL__SHIFT 0x19
+#define JPEG_SYS_INT_ACK__DJPEG0_CORE_MASK 0x00000001L
+#define JPEG_SYS_INT_ACK__DJPEG1_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_ACK__DJPEG2_CORE_MASK 0x00000004L
+#define JPEG_SYS_INT_ACK__DJPEG3_CORE_MASK 0x00000008L
+#define JPEG_SYS_INT_ACK__DJPEG4_CORE_MASK 0x00000010L
+#define JPEG_SYS_INT_ACK__DJPEG5_CORE_MASK 0x00000020L
+#define JPEG_SYS_INT_ACK__DJPEG6_CORE_MASK 0x00000040L
+#define JPEG_SYS_INT_ACK__DJPEG7_CORE_MASK 0x00000080L
+#define JPEG_SYS_INT_ACK__DJRBC0_MASK 0x00000100L
+#define JPEG_SYS_INT_ACK__DJRBC1_MASK 0x00000200L
+#define JPEG_SYS_INT_ACK__DJRBC2_MASK 0x00000400L
+#define JPEG_SYS_INT_ACK__DJRBC3_MASK 0x00000800L
+#define JPEG_SYS_INT_ACK__DJRBC4_MASK 0x00001000L
+#define JPEG_SYS_INT_ACK__DJRBC5_MASK 0x00002000L
+#define JPEG_SYS_INT_ACK__DJRBC6_MASK 0x00004000L
+#define JPEG_SYS_INT_ACK__DJRBC7_MASK 0x00008000L
+#define JPEG_SYS_INT_ACK__DJPEG0_PF_RPT_MASK 0x00010000L
+#define JPEG_SYS_INT_ACK__DJPEG1_PF_RPT_MASK 0x00020000L
+#define JPEG_SYS_INT_ACK__DJPEG2_PF_RPT_MASK 0x00040000L
+#define JPEG_SYS_INT_ACK__DJPEG3_PF_RPT_MASK 0x00080000L
+#define JPEG_SYS_INT_ACK__DJPEG4_PF_RPT_MASK 0x00100000L
+#define JPEG_SYS_INT_ACK__DJPEG5_PF_RPT_MASK 0x00200000L
+#define JPEG_SYS_INT_ACK__DJPEG6_PF_RPT_MASK 0x00400000L
+#define JPEG_SYS_INT_ACK__DJPEG7_PF_RPT_MASK 0x00800000L
+#define JPEG_SYS_INT_ACK__DJPEG0_RAS_CNTL_MASK 0x01000000L
+#define JPEG_SYS_INT_ACK__DJPEG1_RAS_CNTL_MASK 0x02000000L
+//JPEG_SYS_INT_ACK1
+#define JPEG_SYS_INT_ACK1__EJPEG_PF_RPT__SHIFT 0x0
+#define JPEG_SYS_INT_ACK1__EJPEG_CORE__SHIFT 0x1
+#define JPEG_SYS_INT_ACK1__EJRBC__SHIFT 0x2
+#define JPEG_SYS_INT_ACK1__EJPEG_RAS_CNTL__SHIFT 0x3
+#define JPEG_SYS_INT_ACK1__EJPEG_PF_RPT_MASK 0x00000001L
+#define JPEG_SYS_INT_ACK1__EJPEG_CORE_MASK 0x00000002L
+#define JPEG_SYS_INT_ACK1__EJRBC_MASK 0x00000004L
+#define JPEG_SYS_INT_ACK1__EJPEG_RAS_CNTL_MASK 0x00000008L
+//JPEG_MEMCHECK_SYS_INT_EN
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_RD_ERR_EN__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC1_RD_ERR_EN__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC2_RD_ERR_EN__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC3_RD_ERR_EN__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC4_RD_ERR_EN__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC5_RD_ERR_EN__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC6_RD_ERR_EN__SHIFT 0x6
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC7_RD_ERR_EN__SHIFT 0x7
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH0_RD_ERR_EN__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH1_RD_ERR_EN__SHIFT 0x9
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH2_RD_ERR_EN__SHIFT 0xa
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH3_RD_ERR_EN__SHIFT 0xb
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH4_RD_ERR_EN__SHIFT 0xc
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH5_RD_ERR_EN__SHIFT 0xd
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH6_RD_ERR_EN__SHIFT 0xe
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH7_RD_ERR_EN__SHIFT 0xf
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_WR_ERR_EN__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC1_WR_ERR_EN__SHIFT 0x11
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC2_WR_ERR_EN__SHIFT 0x12
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC3_WR_ERR_EN__SHIFT 0x13
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC4_WR_ERR_EN__SHIFT 0x14
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC5_WR_ERR_EN__SHIFT 0x15
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC6_WR_ERR_EN__SHIFT 0x16
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC7_WR_ERR_EN__SHIFT 0x17
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF0_WR_ERR_EN__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF1_WR_ERR_EN__SHIFT 0x19
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF2_WR_ERR_EN__SHIFT 0x1a
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF3_WR_ERR_EN__SHIFT 0x1b
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF4_WR_ERR_EN__SHIFT 0x1c
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF5_WR_ERR_EN__SHIFT 0x1d
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF6_WR_ERR_EN__SHIFT 0x1e
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF7_WR_ERR_EN__SHIFT 0x1f
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_RD_ERR_EN_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC1_RD_ERR_EN_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC2_RD_ERR_EN_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC3_RD_ERR_EN_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC4_RD_ERR_EN_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC5_RD_ERR_EN_MASK 0x00000020L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC6_RD_ERR_EN_MASK 0x00000040L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC7_RD_ERR_EN_MASK 0x00000080L
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH0_RD_ERR_EN_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH1_RD_ERR_EN_MASK 0x00000200L
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH2_RD_ERR_EN_MASK 0x00000400L
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH3_RD_ERR_EN_MASK 0x00000800L
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH4_RD_ERR_EN_MASK 0x00001000L
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH5_RD_ERR_EN_MASK 0x00002000L
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH6_RD_ERR_EN_MASK 0x00004000L
+#define JPEG_MEMCHECK_SYS_INT_EN__BSFETCH7_RD_ERR_EN_MASK 0x00008000L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC0_WR_ERR_EN_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC1_WR_ERR_EN_MASK 0x00020000L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC2_WR_ERR_EN_MASK 0x00040000L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC3_WR_ERR_EN_MASK 0x00080000L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC4_WR_ERR_EN_MASK 0x00100000L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC5_WR_ERR_EN_MASK 0x00200000L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC6_WR_ERR_EN_MASK 0x00400000L
+#define JPEG_MEMCHECK_SYS_INT_EN__DJRBC7_WR_ERR_EN_MASK 0x00800000L
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF0_WR_ERR_EN_MASK 0x01000000L
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF1_WR_ERR_EN_MASK 0x02000000L
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF2_WR_ERR_EN_MASK 0x04000000L
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF3_WR_ERR_EN_MASK 0x08000000L
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF4_WR_ERR_EN_MASK 0x10000000L
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF5_WR_ERR_EN_MASK 0x20000000L
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF6_WR_ERR_EN_MASK 0x40000000L
+#define JPEG_MEMCHECK_SYS_INT_EN__OBUF7_WR_ERR_EN_MASK 0x80000000L
+//JPEG_MEMCHECK_SYS_INT_EN1
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_RD_ERR_EN__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_EN1__PELFETCH_RD_ERR_EN__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_RD_ERR_EN__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_WR_ERR_EN__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_EN1__BS_WR_ERR_EN__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_WR_ERR_EN__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_RD_ERR_EN_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_EN1__PELFETCH_RD_ERR_EN_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_RD_ERR_EN_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_EN1__EJRBC_WR_ERR_EN_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_EN1__BS_WR_ERR_EN_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_EN1__SCALAR_WR_ERR_EN_MASK 0x00000020L
+//JPEG_MEMCHECK_SYS_INT_STAT
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH1_RD_HI_ERR__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH2_RD_HI_ERR__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH3_RD_HI_ERR__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH4_RD_HI_ERR__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH5_RD_HI_ERR__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH6_RD_HI_ERR__SHIFT 0x6
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH7_RD_HI_ERR__SHIFT 0x7
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH1_RD_LO_ERR__SHIFT 0x9
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH2_RD_LO_ERR__SHIFT 0xa
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH3_RD_LO_ERR__SHIFT 0xb
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH4_RD_LO_ERR__SHIFT 0xc
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH5_RD_LO_ERR__SHIFT 0xd
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH6_RD_LO_ERR__SHIFT 0xe
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH7_RD_LO_ERR__SHIFT 0xf
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF1_WR_HI_ERR__SHIFT 0x11
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF2_WR_HI_ERR__SHIFT 0x12
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF3_WR_HI_ERR__SHIFT 0x13
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF4_WR_HI_ERR__SHIFT 0x14
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF5_WR_HI_ERR__SHIFT 0x15
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF6_WR_HI_ERR__SHIFT 0x16
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF7_WR_HI_ERR__SHIFT 0x17
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF1_WR_LO_ERR__SHIFT 0x19
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF2_WR_LO_ERR__SHIFT 0x1a
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF3_WR_LO_ERR__SHIFT 0x1b
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF4_WR_LO_ERR__SHIFT 0x1c
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF5_WR_LO_ERR__SHIFT 0x1d
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF6_WR_LO_ERR__SHIFT 0x1e
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF7_WR_LO_ERR__SHIFT 0x1f
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH1_RD_HI_ERR_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH2_RD_HI_ERR_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH3_RD_HI_ERR_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH4_RD_HI_ERR_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH5_RD_HI_ERR_MASK 0x00000020L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH6_RD_HI_ERR_MASK 0x00000040L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH7_RD_HI_ERR_MASK 0x00000080L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH1_RD_LO_ERR_MASK 0x00000200L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH2_RD_LO_ERR_MASK 0x00000400L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH3_RD_LO_ERR_MASK 0x00000800L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH4_RD_LO_ERR_MASK 0x00001000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH5_RD_LO_ERR_MASK 0x00002000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH6_RD_LO_ERR_MASK 0x00004000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__BSFETCH7_RD_LO_ERR_MASK 0x00008000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF1_WR_HI_ERR_MASK 0x00020000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF2_WR_HI_ERR_MASK 0x00040000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF3_WR_HI_ERR_MASK 0x00080000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF4_WR_HI_ERR_MASK 0x00100000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF5_WR_HI_ERR_MASK 0x00200000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF6_WR_HI_ERR_MASK 0x00400000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF7_WR_HI_ERR_MASK 0x00800000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF0_WR_LO_ERR_MASK 0x01000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF1_WR_LO_ERR_MASK 0x02000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF2_WR_LO_ERR_MASK 0x04000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF3_WR_LO_ERR_MASK 0x08000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF4_WR_LO_ERR_MASK 0x10000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF5_WR_LO_ERR_MASK 0x20000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF6_WR_LO_ERR_MASK 0x40000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT__OBUF7_WR_LO_ERR_MASK 0x80000000L
+//JPEG_MEMCHECK_SYS_INT_STAT1
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_RD_HI_ERR__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_RD_HI_ERR__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_RD_HI_ERR__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_RD_HI_ERR__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_RD_HI_ERR__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_RD_HI_ERR__SHIFT 0x6
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_RD_HI_ERR__SHIFT 0x7
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_RD_LO_ERR__SHIFT 0x9
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_RD_LO_ERR__SHIFT 0xa
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_RD_LO_ERR__SHIFT 0xb
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_RD_LO_ERR__SHIFT 0xc
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_RD_LO_ERR__SHIFT 0xd
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_RD_LO_ERR__SHIFT 0xe
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_RD_LO_ERR__SHIFT 0xf
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_WR_HI_ERR__SHIFT 0x11
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_WR_HI_ERR__SHIFT 0x12
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_WR_HI_ERR__SHIFT 0x13
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_WR_HI_ERR__SHIFT 0x14
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_WR_HI_ERR__SHIFT 0x15
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_WR_HI_ERR__SHIFT 0x16
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_WR_HI_ERR__SHIFT 0x17
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_WR_LO_ERR__SHIFT 0x19
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_WR_LO_ERR__SHIFT 0x1a
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_WR_LO_ERR__SHIFT 0x1b
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_WR_LO_ERR__SHIFT 0x1c
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_WR_LO_ERR__SHIFT 0x1d
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_WR_LO_ERR__SHIFT 0x1e
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_WR_LO_ERR__SHIFT 0x1f
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_RD_HI_ERR_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_RD_HI_ERR_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_RD_HI_ERR_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_RD_HI_ERR_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_RD_HI_ERR_MASK 0x00000020L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_RD_HI_ERR_MASK 0x00000040L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_RD_HI_ERR_MASK 0x00000080L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_RD_LO_ERR_MASK 0x00000200L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_RD_LO_ERR_MASK 0x00000400L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_RD_LO_ERR_MASK 0x00000800L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_RD_LO_ERR_MASK 0x00001000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_RD_LO_ERR_MASK 0x00002000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_RD_LO_ERR_MASK 0x00004000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_RD_LO_ERR_MASK 0x00008000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_WR_HI_ERR_MASK 0x00020000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_WR_HI_ERR_MASK 0x00040000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_WR_HI_ERR_MASK 0x00080000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_WR_HI_ERR_MASK 0x00100000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_WR_HI_ERR_MASK 0x00200000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_WR_HI_ERR_MASK 0x00400000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_WR_HI_ERR_MASK 0x00800000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC0_WR_LO_ERR_MASK 0x01000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC1_WR_LO_ERR_MASK 0x02000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC2_WR_LO_ERR_MASK 0x04000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC3_WR_LO_ERR_MASK 0x08000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC4_WR_LO_ERR_MASK 0x10000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC5_WR_LO_ERR_MASK 0x20000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC6_WR_LO_ERR_MASK 0x40000000L
+#define JPEG_MEMCHECK_SYS_INT_STAT1__DJRBC7_WR_LO_ERR_MASK 0x80000000L
+//JPEG_MEMCHECK_SYS_INT_STAT2
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_LO_ERR__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_HI_ERR__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_LO_ERR__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_HI_ERR__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_LO_ERR__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_HI_ERR__SHIFT 0x6
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_LO_ERR__SHIFT 0x7
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_HI_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_LO_ERR__SHIFT 0x9
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_HI_ERR__SHIFT 0xa
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_LO_ERR__SHIFT 0xb
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_RD_LO_ERR_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_HI_ERR_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__PELFETCH_RD_LO_ERR_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_HI_ERR_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_RD_LO_ERR_MASK 0x00000020L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_HI_ERR_MASK 0x00000040L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__EJRBC_WR_LO_ERR_MASK 0x00000080L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_HI_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__BS_WR_LO_ERR_MASK 0x00000200L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_HI_ERR_MASK 0x00000400L
+#define JPEG_MEMCHECK_SYS_INT_STAT2__SCALAR_WR_LO_ERR_MASK 0x00000800L
+//JPEG_MEMCHECK_SYS_INT_ACK
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH1_RD_HI_ERR__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH2_RD_HI_ERR__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH3_RD_HI_ERR__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH4_RD_HI_ERR__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH5_RD_HI_ERR__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH6_RD_HI_ERR__SHIFT 0x6
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH7_RD_HI_ERR__SHIFT 0x7
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH1_RD_LO_ERR__SHIFT 0x9
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH2_RD_LO_ERR__SHIFT 0xa
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH3_RD_LO_ERR__SHIFT 0xb
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH4_RD_LO_ERR__SHIFT 0xc
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH5_RD_LO_ERR__SHIFT 0xd
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH6_RD_LO_ERR__SHIFT 0xe
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH7_RD_LO_ERR__SHIFT 0xf
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF1_WR_HI_ERR__SHIFT 0x11
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF2_WR_HI_ERR__SHIFT 0x12
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF3_WR_HI_ERR__SHIFT 0x13
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF4_WR_HI_ERR__SHIFT 0x14
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF5_WR_HI_ERR__SHIFT 0x15
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF6_WR_HI_ERR__SHIFT 0x16
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF7_WR_HI_ERR__SHIFT 0x17
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF1_WR_LO_ERR__SHIFT 0x19
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF2_WR_LO_ERR__SHIFT 0x1a
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF3_WR_LO_ERR__SHIFT 0x1b
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF4_WR_LO_ERR__SHIFT 0x1c
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF5_WR_LO_ERR__SHIFT 0x1d
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF6_WR_LO_ERR__SHIFT 0x1e
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF7_WR_LO_ERR__SHIFT 0x1f
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH1_RD_HI_ERR_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH2_RD_HI_ERR_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH3_RD_HI_ERR_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH4_RD_HI_ERR_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH5_RD_HI_ERR_MASK 0x00000020L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH6_RD_HI_ERR_MASK 0x00000040L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH7_RD_HI_ERR_MASK 0x00000080L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH1_RD_LO_ERR_MASK 0x00000200L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH2_RD_LO_ERR_MASK 0x00000400L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH3_RD_LO_ERR_MASK 0x00000800L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH4_RD_LO_ERR_MASK 0x00001000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH5_RD_LO_ERR_MASK 0x00002000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH6_RD_LO_ERR_MASK 0x00004000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__BSFETCH7_RD_LO_ERR_MASK 0x00008000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF1_WR_HI_ERR_MASK 0x00020000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF2_WR_HI_ERR_MASK 0x00040000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF3_WR_HI_ERR_MASK 0x00080000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF4_WR_HI_ERR_MASK 0x00100000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF5_WR_HI_ERR_MASK 0x00200000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF6_WR_HI_ERR_MASK 0x00400000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF7_WR_HI_ERR_MASK 0x00800000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF0_WR_LO_ERR_MASK 0x01000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF1_WR_LO_ERR_MASK 0x02000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF2_WR_LO_ERR_MASK 0x04000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF3_WR_LO_ERR_MASK 0x08000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF4_WR_LO_ERR_MASK 0x10000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF5_WR_LO_ERR_MASK 0x20000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF6_WR_LO_ERR_MASK 0x40000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK__OBUF7_WR_LO_ERR_MASK 0x80000000L
+//JPEG_MEMCHECK_SYS_INT_ACK1
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_RD_HI_ERR__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_RD_HI_ERR__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_RD_HI_ERR__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_RD_HI_ERR__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_RD_HI_ERR__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_RD_HI_ERR__SHIFT 0x6
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_RD_HI_ERR__SHIFT 0x7
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_LO_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_RD_LO_ERR__SHIFT 0x9
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_RD_LO_ERR__SHIFT 0xa
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_RD_LO_ERR__SHIFT 0xb
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_RD_LO_ERR__SHIFT 0xc
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_RD_LO_ERR__SHIFT 0xd
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_RD_LO_ERR__SHIFT 0xe
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_RD_LO_ERR__SHIFT 0xf
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_HI_ERR__SHIFT 0x10
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_WR_HI_ERR__SHIFT 0x11
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_WR_HI_ERR__SHIFT 0x12
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_WR_HI_ERR__SHIFT 0x13
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_WR_HI_ERR__SHIFT 0x14
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_WR_HI_ERR__SHIFT 0x15
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_WR_HI_ERR__SHIFT 0x16
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_WR_HI_ERR__SHIFT 0x17
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_LO_ERR__SHIFT 0x18
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_WR_LO_ERR__SHIFT 0x19
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_WR_LO_ERR__SHIFT 0x1a
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_WR_LO_ERR__SHIFT 0x1b
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_WR_LO_ERR__SHIFT 0x1c
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_WR_LO_ERR__SHIFT 0x1d
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_WR_LO_ERR__SHIFT 0x1e
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_WR_LO_ERR__SHIFT 0x1f
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_RD_HI_ERR_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_RD_HI_ERR_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_RD_HI_ERR_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_RD_HI_ERR_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_RD_HI_ERR_MASK 0x00000020L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_RD_HI_ERR_MASK 0x00000040L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_RD_HI_ERR_MASK 0x00000080L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_RD_LO_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_RD_LO_ERR_MASK 0x00000200L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_RD_LO_ERR_MASK 0x00000400L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_RD_LO_ERR_MASK 0x00000800L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_RD_LO_ERR_MASK 0x00001000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_RD_LO_ERR_MASK 0x00002000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_RD_LO_ERR_MASK 0x00004000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_RD_LO_ERR_MASK 0x00008000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_HI_ERR_MASK 0x00010000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_WR_HI_ERR_MASK 0x00020000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_WR_HI_ERR_MASK 0x00040000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_WR_HI_ERR_MASK 0x00080000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_WR_HI_ERR_MASK 0x00100000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_WR_HI_ERR_MASK 0x00200000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_WR_HI_ERR_MASK 0x00400000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_WR_HI_ERR_MASK 0x00800000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC0_WR_LO_ERR_MASK 0x01000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC1_WR_LO_ERR_MASK 0x02000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC2_WR_LO_ERR_MASK 0x04000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC3_WR_LO_ERR_MASK 0x08000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC4_WR_LO_ERR_MASK 0x10000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC5_WR_LO_ERR_MASK 0x20000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC6_WR_LO_ERR_MASK 0x40000000L
+#define JPEG_MEMCHECK_SYS_INT_ACK1__DJRBC7_WR_LO_ERR_MASK 0x80000000L
+//JPEG_MEMCHECK_SYS_INT_ACK2
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_HI_ERR__SHIFT 0x0
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_LO_ERR__SHIFT 0x1
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_HI_ERR__SHIFT 0x2
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_LO_ERR__SHIFT 0x3
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_HI_ERR__SHIFT 0x4
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_LO_ERR__SHIFT 0x5
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_HI_ERR__SHIFT 0x6
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_LO_ERR__SHIFT 0x7
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_HI_ERR__SHIFT 0x8
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_LO_ERR__SHIFT 0x9
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_HI_ERR__SHIFT 0xa
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_LO_ERR__SHIFT 0xb
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_HI_ERR_MASK 0x00000001L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_RD_LO_ERR_MASK 0x00000002L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_HI_ERR_MASK 0x00000004L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__PELFETCH_RD_LO_ERR_MASK 0x00000008L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_HI_ERR_MASK 0x00000010L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_RD_LO_ERR_MASK 0x00000020L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_HI_ERR_MASK 0x00000040L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__EJRBC_WR_LO_ERR_MASK 0x00000080L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_HI_ERR_MASK 0x00000100L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__BS_WR_LO_ERR_MASK 0x00000200L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_HI_ERR_MASK 0x00000400L
+#define JPEG_MEMCHECK_SYS_INT_ACK2__SCALAR_WR_LO_ERR_MASK 0x00000800L
+//JPEG_MASTINT_EN
+#define JPEG_MASTINT_EN__OVERRUN_RST__SHIFT 0x0
+#define JPEG_MASTINT_EN__INT_OVERRUN__SHIFT 0x4
+#define JPEG_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L
+#define JPEG_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L
+//JPEG_IH_CTRL
+#define JPEG_IH_CTRL__IH_SOFT_RESET__SHIFT 0x0
+#define JPEG_IH_CTRL__IH_STALL_EN__SHIFT 0x1
+#define JPEG_IH_CTRL__IH_STATUS_CLEAN__SHIFT 0x2
+#define JPEG_IH_CTRL__IH_VMID__SHIFT 0x3
+#define JPEG_IH_CTRL__IH_USER_DATA__SHIFT 0x7
+#define JPEG_IH_CTRL__IH_RINGID__SHIFT 0x13
+#define JPEG_IH_CTRL__IH_SOFT_RESET_MASK 0x00000001L
+#define JPEG_IH_CTRL__IH_STALL_EN_MASK 0x00000002L
+#define JPEG_IH_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L
+#define JPEG_IH_CTRL__IH_VMID_MASK 0x00000078L
+#define JPEG_IH_CTRL__IH_USER_DATA_MASK 0x0007FF80L
+#define JPEG_IH_CTRL__IH_RINGID_MASK 0x07F80000L
+//JRBBM_ARB_CTRL
+#define JRBBM_ARB_CTRL__DJRBC0_DROP__SHIFT 0x0
+#define JRBBM_ARB_CTRL__DJRBC1_DROP__SHIFT 0x1
+#define JRBBM_ARB_CTRL__DJRBC2_DROP__SHIFT 0x2
+#define JRBBM_ARB_CTRL__DJRBC3_DROP__SHIFT 0x3
+#define JRBBM_ARB_CTRL__DJRBC4_DROP__SHIFT 0x4
+#define JRBBM_ARB_CTRL__DJRBC5_DROP__SHIFT 0x5
+#define JRBBM_ARB_CTRL__DJRBC6_DROP__SHIFT 0x6
+#define JRBBM_ARB_CTRL__DJRBC7_DROP__SHIFT 0x7
+#define JRBBM_ARB_CTRL__EJRBC_DROP__SHIFT 0x8
+#define JRBBM_ARB_CTRL__SRBM_DROP__SHIFT 0x9
+#define JRBBM_ARB_CTRL__DJRBC0_DROP_MASK 0x00000001L
+#define JRBBM_ARB_CTRL__DJRBC1_DROP_MASK 0x00000002L
+#define JRBBM_ARB_CTRL__DJRBC2_DROP_MASK 0x00000004L
+#define JRBBM_ARB_CTRL__DJRBC3_DROP_MASK 0x00000008L
+#define JRBBM_ARB_CTRL__DJRBC4_DROP_MASK 0x00000010L
+#define JRBBM_ARB_CTRL__DJRBC5_DROP_MASK 0x00000020L
+#define JRBBM_ARB_CTRL__DJRBC6_DROP_MASK 0x00000040L
+#define JRBBM_ARB_CTRL__DJRBC7_DROP_MASK 0x00000080L
+#define JRBBM_ARB_CTRL__EJRBC_DROP_MASK 0x00000100L
+#define JRBBM_ARB_CTRL__SRBM_DROP_MASK 0x00000200L
+
+
+// addressBlock: aid_uvd0_uvd_jpeg_common_sclk_dec
+//JPEG_CGC_GATE
+#define JPEG_CGC_GATE__JPEG0_DEC__SHIFT 0x0
+#define JPEG_CGC_GATE__JPEG1_DEC__SHIFT 0x1
+#define JPEG_CGC_GATE__JPEG2_DEC__SHIFT 0x2
+#define JPEG_CGC_GATE__JPEG3_DEC__SHIFT 0x3
+#define JPEG_CGC_GATE__JPEG4_DEC__SHIFT 0x4
+#define JPEG_CGC_GATE__JPEG5_DEC__SHIFT 0x5
+#define JPEG_CGC_GATE__JPEG6_DEC__SHIFT 0x6
+#define JPEG_CGC_GATE__JPEG7_DEC__SHIFT 0x7
+#define JPEG_CGC_GATE__JPEG_ENC__SHIFT 0x8
+#define JPEG_CGC_GATE__JMCIF__SHIFT 0x9
+#define JPEG_CGC_GATE__JRBBM__SHIFT 0xa
+#define JPEG_CGC_GATE__JPEG0_DEC_MASK 0x00000001L
+#define JPEG_CGC_GATE__JPEG1_DEC_MASK 0x00000002L
+#define JPEG_CGC_GATE__JPEG2_DEC_MASK 0x00000004L
+#define JPEG_CGC_GATE__JPEG3_DEC_MASK 0x00000008L
+#define JPEG_CGC_GATE__JPEG4_DEC_MASK 0x00000010L
+#define JPEG_CGC_GATE__JPEG5_DEC_MASK 0x00000020L
+#define JPEG_CGC_GATE__JPEG6_DEC_MASK 0x00000040L
+#define JPEG_CGC_GATE__JPEG7_DEC_MASK 0x00000080L
+#define JPEG_CGC_GATE__JPEG_ENC_MASK 0x00000100L
+#define JPEG_CGC_GATE__JMCIF_MASK 0x00000200L
+#define JPEG_CGC_GATE__JRBBM_MASK 0x00000400L
+//JPEG_CGC_CTRL
+#define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0
+#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x1
+#define JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x5
+#define JPEG_CGC_CTRL__JPEG0_DEC_MODE__SHIFT 0x10
+#define JPEG_CGC_CTRL__JPEG1_DEC_MODE__SHIFT 0x11
+#define JPEG_CGC_CTRL__JPEG2_DEC_MODE__SHIFT 0x12
+#define JPEG_CGC_CTRL__JPEG3_DEC_MODE__SHIFT 0x13
+#define JPEG_CGC_CTRL__JPEG4_DEC_MODE__SHIFT 0x14
+#define JPEG_CGC_CTRL__JPEG5_DEC_MODE__SHIFT 0x15
+#define JPEG_CGC_CTRL__JPEG6_DEC_MODE__SHIFT 0x16
+#define JPEG_CGC_CTRL__JPEG7_DEC_MODE__SHIFT 0x17
+#define JPEG_CGC_CTRL__JPEG_ENC_MODE__SHIFT 0x18
+#define JPEG_CGC_CTRL__JMCIF_MODE__SHIFT 0x19
+#define JPEG_CGC_CTRL__JRBBM_MODE__SHIFT 0x1a
+#define JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L
+#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000001EL
+#define JPEG_CGC_CTRL__CLK_OFF_DELAY_MASK 0x00001FE0L
+#define JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK 0x00010000L
+#define JPEG_CGC_CTRL__JPEG1_DEC_MODE_MASK 0x00020000L
+#define JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK 0x00040000L
+#define JPEG_CGC_CTRL__JPEG3_DEC_MODE_MASK 0x00080000L
+#define JPEG_CGC_CTRL__JPEG4_DEC_MODE_MASK 0x00100000L
+#define JPEG_CGC_CTRL__JPEG5_DEC_MODE_MASK 0x00200000L
+#define JPEG_CGC_CTRL__JPEG6_DEC_MODE_MASK 0x00400000L
+#define JPEG_CGC_CTRL__JPEG7_DEC_MODE_MASK 0x00800000L
+#define JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK 0x01000000L
+#define JPEG_CGC_CTRL__JMCIF_MODE_MASK 0x02000000L
+#define JPEG_CGC_CTRL__JRBBM_MODE_MASK 0x04000000L
+//JPEG_CGC_STATUS
+#define JPEG_CGC_STATUS__JPEG0_DEC_VCLK_ACTIVE__SHIFT 0x0
+#define JPEG_CGC_STATUS__JPEG0_DEC_SCLK_ACTIVE__SHIFT 0x1
+#define JPEG_CGC_STATUS__JPEG1_DEC_VCLK_ACTIVE__SHIFT 0x2
+#define JPEG_CGC_STATUS__JPEG1_DEC_SCLK_ACTIVE__SHIFT 0x3
+#define JPEG_CGC_STATUS__JPEG2_DEC_VCLK_ACTIVE__SHIFT 0x4
+#define JPEG_CGC_STATUS__JPEG2_DEC_SCLK_ACTIVE__SHIFT 0x5
+#define JPEG_CGC_STATUS__JPEG3_DEC_VCLK_ACTIVE__SHIFT 0x6
+#define JPEG_CGC_STATUS__JPEG3_DEC_SCLK_ACTIVE__SHIFT 0x7
+#define JPEG_CGC_STATUS__JPEG4_DEC_VCLK_ACTIVE__SHIFT 0x8
+#define JPEG_CGC_STATUS__JPEG4_DEC_SCLK_ACTIVE__SHIFT 0x9
+#define JPEG_CGC_STATUS__JPEG5_DEC_VCLK_ACTIVE__SHIFT 0xa
+#define JPEG_CGC_STATUS__JPEG5_DEC_SCLK_ACTIVE__SHIFT 0xb
+#define JPEG_CGC_STATUS__JPEG6_DEC_VCLK_ACTIVE__SHIFT 0xc
+#define JPEG_CGC_STATUS__JPEG6_DEC_SCLK_ACTIVE__SHIFT 0xd
+#define JPEG_CGC_STATUS__JPEG7_DEC_VCLK_ACTIVE__SHIFT 0xe
+#define JPEG_CGC_STATUS__JPEG7_DEC_SCLK_ACTIVE__SHIFT 0xf
+#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE__SHIFT 0x10
+#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE__SHIFT 0x11
+#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE__SHIFT 0x12
+#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE__SHIFT 0x13
+#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE__SHIFT 0x14
+#define JPEG_CGC_STATUS__JPEG0_DEC_VCLK_ACTIVE_MASK 0x00000001L
+#define JPEG_CGC_STATUS__JPEG0_DEC_SCLK_ACTIVE_MASK 0x00000002L
+#define JPEG_CGC_STATUS__JPEG1_DEC_VCLK_ACTIVE_MASK 0x00000004L
+#define JPEG_CGC_STATUS__JPEG1_DEC_SCLK_ACTIVE_MASK 0x00000008L
+#define JPEG_CGC_STATUS__JPEG2_DEC_VCLK_ACTIVE_MASK 0x00000010L
+#define JPEG_CGC_STATUS__JPEG2_DEC_SCLK_ACTIVE_MASK 0x00000020L
+#define JPEG_CGC_STATUS__JPEG3_DEC_VCLK_ACTIVE_MASK 0x00000040L
+#define JPEG_CGC_STATUS__JPEG3_DEC_SCLK_ACTIVE_MASK 0x00000080L
+#define JPEG_CGC_STATUS__JPEG4_DEC_VCLK_ACTIVE_MASK 0x00000100L
+#define JPEG_CGC_STATUS__JPEG4_DEC_SCLK_ACTIVE_MASK 0x00000200L
+#define JPEG_CGC_STATUS__JPEG5_DEC_VCLK_ACTIVE_MASK 0x00000400L
+#define JPEG_CGC_STATUS__JPEG5_DEC_SCLK_ACTIVE_MASK 0x00000800L
+#define JPEG_CGC_STATUS__JPEG6_DEC_VCLK_ACTIVE_MASK 0x00001000L
+#define JPEG_CGC_STATUS__JPEG6_DEC_SCLK_ACTIVE_MASK 0x00002000L
+#define JPEG_CGC_STATUS__JPEG7_DEC_VCLK_ACTIVE_MASK 0x00004000L
+#define JPEG_CGC_STATUS__JPEG7_DEC_SCLK_ACTIVE_MASK 0x00008000L
+#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE_MASK 0x00010000L
+#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE_MASK 0x00020000L
+#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE_MASK 0x00040000L
+#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE_MASK 0x00080000L
+#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE_MASK 0x00100000L
+//JPEG_COMN_CGC_MEM_CTRL
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN__SHIFT 0x0
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN__SHIFT 0x1
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN__SHIFT 0x2
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_SW_EN__SHIFT 0x3
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN_MASK 0x00000001L
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN_MASK 0x00000002L
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN_MASK 0x00000004L
+#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_SW_EN_MASK 0x00000008L
+//JPEG_DEC_CGC_MEM_CTRL
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_EN__SHIFT 0x0
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_DS_EN__SHIFT 0x1
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_SD_EN__SHIFT 0x2
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_SW_EN__SHIFT 0x3
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_LS_EN__SHIFT 0x4
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_DS_EN__SHIFT 0x5
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_SD_EN__SHIFT 0x6
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_LS_SW_EN__SHIFT 0x7
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_EN__SHIFT 0x8
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_DS_EN__SHIFT 0x9
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_SD_EN__SHIFT 0xa
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_SW_EN__SHIFT 0xb
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_LS_EN__SHIFT 0xc
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_DS_EN__SHIFT 0xd
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_SD_EN__SHIFT 0xe
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_LS_SW_EN__SHIFT 0xf
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_LS_EN__SHIFT 0x10
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_DS_EN__SHIFT 0x11
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_SD_EN__SHIFT 0x12
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_LS_SW_EN__SHIFT 0x13
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_LS_EN__SHIFT 0x14
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_DS_EN__SHIFT 0x15
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_SD_EN__SHIFT 0x16
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_LS_SW_EN__SHIFT 0x17
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_LS_EN__SHIFT 0x18
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_DS_EN__SHIFT 0x19
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_SD_EN__SHIFT 0x1a
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_LS_SW_EN__SHIFT 0x1b
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_LS_EN__SHIFT 0x1c
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_DS_EN__SHIFT 0x1d
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_SD_EN__SHIFT 0x1e
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_LS_SW_EN__SHIFT 0x1f
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_EN_MASK 0x00000001L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_DS_EN_MASK 0x00000002L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_SD_EN_MASK 0x00000004L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG0_DEC_LS_SW_EN_MASK 0x00000008L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_LS_EN_MASK 0x00000010L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_DS_EN_MASK 0x00000020L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_SD_EN_MASK 0x00000040L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG1_DEC_LS_SW_EN_MASK 0x00000080L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_EN_MASK 0x00000100L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_DS_EN_MASK 0x00000200L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_SD_EN_MASK 0x00000400L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_SW_EN_MASK 0x00000800L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_LS_EN_MASK 0x00001000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_DS_EN_MASK 0x00002000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_SD_EN_MASK 0x00004000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG3_DEC_LS_SW_EN_MASK 0x00008000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_LS_EN_MASK 0x00010000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_DS_EN_MASK 0x00020000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_SD_EN_MASK 0x00040000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG4_DEC_LS_SW_EN_MASK 0x00080000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_LS_EN_MASK 0x00100000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_DS_EN_MASK 0x00200000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_SD_EN_MASK 0x00400000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG5_DEC_LS_SW_EN_MASK 0x00800000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_LS_EN_MASK 0x01000000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_DS_EN_MASK 0x02000000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_SD_EN_MASK 0x04000000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG6_DEC_LS_SW_EN_MASK 0x08000000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_LS_EN_MASK 0x10000000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_DS_EN_MASK 0x20000000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_SD_EN_MASK 0x40000000L
+#define JPEG_DEC_CGC_MEM_CTRL__JPEG7_DEC_LS_SW_EN_MASK 0x80000000L
+//JPEG_ENC_CGC_MEM_CTRL
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN__SHIFT 0x0
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN__SHIFT 0x1
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN__SHIFT 0x2
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_SW_EN__SHIFT 0x3
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN_MASK 0x00000001L
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN_MASK 0x00000002L
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN_MASK 0x00000004L
+#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_SW_EN_MASK 0x00000008L
+//JPEG_PERF_BANK_CONF
+#define JPEG_PERF_BANK_CONF__RESET__SHIFT 0x0
+#define JPEG_PERF_BANK_CONF__PEEK__SHIFT 0x8
+#define JPEG_PERF_BANK_CONF__CONCATENATE__SHIFT 0x10
+#define JPEG_PERF_BANK_CONF__CORE_SEL__SHIFT 0x15
+#define JPEG_PERF_BANK_CONF__RESET_MASK 0x0000000FL
+#define JPEG_PERF_BANK_CONF__PEEK_MASK 0x00000F00L
+#define JPEG_PERF_BANK_CONF__CONCATENATE_MASK 0x00030000L
+#define JPEG_PERF_BANK_CONF__CORE_SEL_MASK 0x00E00000L
+//JPEG_PERF_BANK_EVENT_SEL
+#define JPEG_PERF_BANK_EVENT_SEL__SEL0__SHIFT 0x0
+#define JPEG_PERF_BANK_EVENT_SEL__SEL1__SHIFT 0x8
+#define JPEG_PERF_BANK_EVENT_SEL__SEL2__SHIFT 0x10
+#define JPEG_PERF_BANK_EVENT_SEL__SEL3__SHIFT 0x18
+#define JPEG_PERF_BANK_EVENT_SEL__SEL0_MASK 0x000000FFL
+#define JPEG_PERF_BANK_EVENT_SEL__SEL1_MASK 0x0000FF00L
+#define JPEG_PERF_BANK_EVENT_SEL__SEL2_MASK 0x00FF0000L
+#define JPEG_PERF_BANK_EVENT_SEL__SEL3_MASK 0xFF000000L
+//JPEG_PERF_BANK_COUNT0
+#define JPEG_PERF_BANK_COUNT0__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT0__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT1
+#define JPEG_PERF_BANK_COUNT1__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT1__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT2
+#define JPEG_PERF_BANK_COUNT2__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT2__COUNT_MASK 0xFFFFFFFFL
+//JPEG_PERF_BANK_COUNT3
+#define JPEG_PERF_BANK_COUNT3__COUNT__SHIFT 0x0
+#define JPEG_PERF_BANK_COUNT3__COUNT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_uvd_pg_dec
+//UVD_PGFSM_CONFIG
+#define UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 0x0
+#define UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT 0x2
+#define UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 0x4
+#define UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT 0x6
+#define UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 0x8
+#define UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT 0xa
+#define UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT 0xc
+#define UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 0xe
+#define UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 0x10
+#define UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 0x12
+#define UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT 0x14
+#define UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT 0x16
+#define UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT 0x18
+#define UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT 0x1a
+#define UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT 0x1c
+#define UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG_MASK 0x00000003L
+#define UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG_MASK 0x0000000CL
+#define UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG_MASK 0x00000030L
+#define UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG_MASK 0x000000C0L
+#define UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG_MASK 0x00000300L
+#define UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG_MASK 0x00000C00L
+#define UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG_MASK 0x00003000L
+#define UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG_MASK 0x0000C000L
+#define UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG_MASK 0x00030000L
+#define UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG_MASK 0x000C0000L
+#define UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG_MASK 0x00300000L
+#define UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG_MASK 0x00C00000L
+#define UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG_MASK 0x03000000L
+#define UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG_MASK 0x0C000000L
+#define UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG_MASK 0x30000000L
+//UVD_PGFSM_STATUS
+#define UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT 0x0
+#define UVD_PGFSM_STATUS__UVDS_PWR_STATUS__SHIFT 0x2
+#define UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT 0x4
+#define UVD_PGFSM_STATUS__UVDTC_PWR_STATUS__SHIFT 0x6
+#define UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT 0x8
+#define UVD_PGFSM_STATUS__UVDTA_PWR_STATUS__SHIFT 0xa
+#define UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT 0xc
+#define UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT 0xe
+#define UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT 0x10
+#define UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT 0x12
+#define UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT 0x14
+#define UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT 0x16
+#define UVD_PGFSM_STATUS__UVDTB_PWR_STATUS__SHIFT 0x18
+#define UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT 0x1a
+#define UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT 0x1c
+#define UVD_PGFSM_STATUS__UVDM_PWR_STATUS_MASK 0x00000003L
+#define UVD_PGFSM_STATUS__UVDS_PWR_STATUS_MASK 0x0000000CL
+#define UVD_PGFSM_STATUS__UVDF_PWR_STATUS_MASK 0x00000030L
+#define UVD_PGFSM_STATUS__UVDTC_PWR_STATUS_MASK 0x000000C0L
+#define UVD_PGFSM_STATUS__UVDB_PWR_STATUS_MASK 0x00000300L
+#define UVD_PGFSM_STATUS__UVDTA_PWR_STATUS_MASK 0x00000C00L
+#define UVD_PGFSM_STATUS__UVDLM_PWR_STATUS_MASK 0x00003000L
+#define UVD_PGFSM_STATUS__UVDTD_PWR_STATUS_MASK 0x0000C000L
+#define UVD_PGFSM_STATUS__UVDTE_PWR_STATUS_MASK 0x00030000L
+#define UVD_PGFSM_STATUS__UVDE_PWR_STATUS_MASK 0x000C0000L
+#define UVD_PGFSM_STATUS__UVDAB_PWR_STATUS_MASK 0x00300000L
+#define UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK 0x00C00000L
+#define UVD_PGFSM_STATUS__UVDTB_PWR_STATUS_MASK 0x03000000L
+#define UVD_PGFSM_STATUS__UVDNA_PWR_STATUS_MASK 0x0C000000L
+#define UVD_PGFSM_STATUS__UVDNB_PWR_STATUS_MASK 0x30000000L
+//UVD_POWER_STATUS
+#define UVD_POWER_STATUS__UVD_POWER_STATUS__SHIFT 0x0
+#define UVD_POWER_STATUS__UVD_PG_MODE__SHIFT 0x2
+#define UVD_POWER_STATUS__UVD_CG_MODE__SHIFT 0x4
+#define UVD_POWER_STATUS__UVD_PG_EN__SHIFT 0x8
+#define UVD_POWER_STATUS__RBC_SNOOP_DIS__SHIFT 0x9
+#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS__SHIFT 0xb
+#define UVD_POWER_STATUS__STALL_DPG_POWER_UP__SHIFT 0x1f
+#define UVD_POWER_STATUS__UVD_POWER_STATUS_MASK 0x00000003L
+#define UVD_POWER_STATUS__UVD_PG_MODE_MASK 0x00000004L
+#define UVD_POWER_STATUS__UVD_CG_MODE_MASK 0x00000030L
+#define UVD_POWER_STATUS__UVD_PG_EN_MASK 0x00000100L
+#define UVD_POWER_STATUS__RBC_SNOOP_DIS_MASK 0x00000200L
+#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS_MASK 0x00000800L
+#define UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK 0x80000000L
+//UVD_JPEG_POWER_STATUS
+#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS__SHIFT 0x0
+#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE__SHIFT 0x4
+#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS__SHIFT 0x8
+#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS__SHIFT 0x9
+#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP__SHIFT 0x1f
+#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK 0x00000001L
+#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK 0x00000010L
+#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS_MASK 0x00000100L
+#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS_MASK 0x00000200L
+#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP_MASK 0x80000000L
+//UVD_MC_DJPEG_RD_SPACE
+#define UVD_MC_DJPEG_RD_SPACE__DJPEG_RD_SPACE__SHIFT 0x0
+#define UVD_MC_DJPEG_RD_SPACE__DJPEG_RD_SPACE_MASK 0x0003FFFFL
+//UVD_MC_DJPEG_WR_SPACE
+#define UVD_MC_DJPEG_WR_SPACE__DJPEG_WR_SPACE__SHIFT 0x0
+#define UVD_MC_DJPEG_WR_SPACE__DJPEG_WR_SPACE_MASK 0x0003FFFFL
+//UVD_MC_EJPEG_RD_SPACE
+#define UVD_MC_EJPEG_RD_SPACE__EJPEG_RD_SPACE__SHIFT 0x0
+#define UVD_MC_EJPEG_RD_SPACE__EJPEG_RD_SPACE_MASK 0x0003FFFFL
+//UVD_MC_EJPEG_WR_SPACE
+#define UVD_MC_EJPEG_WR_SPACE__EJPEG_WR_SPACE__SHIFT 0x0
+#define UVD_MC_EJPEG_WR_SPACE__EJPEG_WR_SPACE_MASK 0x0003FFFFL
+//UVD_PG_IND_INDEX
+#define UVD_PG_IND_INDEX__INDEX__SHIFT 0x0
+#define UVD_PG_IND_INDEX__INDEX_MASK 0x0000003FL
+//UVD_PG_IND_DATA
+#define UVD_PG_IND_DATA__DATA__SHIFT 0x0
+#define UVD_PG_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//CC_UVD_HARVESTING
+#define CC_UVD_HARVESTING__MMSCH_DISABLE__SHIFT 0x0
+#define CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
+#define CC_UVD_HARVESTING__MMSCH_DISABLE_MASK 0x00000001L
+#define CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
+//UVD_DPG_LMA_CTL
+#define UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 0x0
+#define UVD_DPG_LMA_CTL__MASK_EN__SHIFT 0x1
+#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT__SHIFT 0x2
+#define UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT 0x4
+#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT 0x10
+#define UVD_DPG_LMA_CTL__READ_WRITE_MASK 0x00000001L
+#define UVD_DPG_LMA_CTL__MASK_EN_MASK 0x00000002L
+#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT_MASK 0x00000004L
+#define UVD_DPG_LMA_CTL__SRAM_SEL_MASK 0x00000010L
+#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR_MASK 0xFFFF0000L
+//UVD_DPG_LMA_DATA
+#define UVD_DPG_LMA_DATA__LMA_DATA__SHIFT 0x0
+#define UVD_DPG_LMA_DATA__LMA_DATA_MASK 0xFFFFFFFFL
+//UVD_DPG_LMA_MASK
+#define UVD_DPG_LMA_MASK__LMA_MASK__SHIFT 0x0
+#define UVD_DPG_LMA_MASK__LMA_MASK_MASK 0xFFFFFFFFL
+//UVD_DPG_PAUSE
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ__SHIFT 0x0
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK__SHIFT 0x1
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ__SHIFT 0x2
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK__SHIFT 0x3
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK 0x00000001L
+#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK 0x00000002L
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK 0x00000004L
+#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK 0x00000008L
+//UVD_SCRATCH1
+#define UVD_SCRATCH1__SCRATCH1_DATA__SHIFT 0x0
+#define UVD_SCRATCH1__SCRATCH1_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH2
+#define UVD_SCRATCH2__SCRATCH2_DATA__SHIFT 0x0
+#define UVD_SCRATCH2__SCRATCH2_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH3
+#define UVD_SCRATCH3__SCRATCH3_DATA__SHIFT 0x0
+#define UVD_SCRATCH3__SCRATCH3_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH4
+#define UVD_SCRATCH4__SCRATCH4_DATA__SHIFT 0x0
+#define UVD_SCRATCH4__SCRATCH4_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH5
+#define UVD_SCRATCH5__SCRATCH5_DATA__SHIFT 0x0
+#define UVD_SCRATCH5__SCRATCH5_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH6
+#define UVD_SCRATCH6__SCRATCH6_DATA__SHIFT 0x0
+#define UVD_SCRATCH6__SCRATCH6_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH7
+#define UVD_SCRATCH7__SCRATCH7_DATA__SHIFT 0x0
+#define UVD_SCRATCH7__SCRATCH7_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH8
+#define UVD_SCRATCH8__SCRATCH8_DATA__SHIFT 0x0
+#define UVD_SCRATCH8__SCRATCH8_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH9
+#define UVD_SCRATCH9__SCRATCH9_DATA__SHIFT 0x0
+#define UVD_SCRATCH9__SCRATCH9_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH10
+#define UVD_SCRATCH10__SCRATCH10_DATA__SHIFT 0x0
+#define UVD_SCRATCH10__SCRATCH10_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH11
+#define UVD_SCRATCH11__SCRATCH11_DATA__SHIFT 0x0
+#define UVD_SCRATCH11__SCRATCH11_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH12
+#define UVD_SCRATCH12__SCRATCH12_DATA__SHIFT 0x0
+#define UVD_SCRATCH12__SCRATCH12_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH13
+#define UVD_SCRATCH13__SCRATCH13_DATA__SHIFT 0x0
+#define UVD_SCRATCH13__SCRATCH13_DATA_MASK 0xFFFFFFFFL
+//UVD_SCRATCH14
+#define UVD_SCRATCH14__SCRATCH14_DATA__SHIFT 0x0
+#define UVD_SCRATCH14__SCRATCH14_DATA_MASK 0xFFFFFFFFL
+//UVD_FREE_COUNTER_REG
+#define UVD_FREE_COUNTER_REG__FREE_COUNTER__SHIFT 0x0
+#define UVD_FREE_COUNTER_REG__FREE_COUNTER_MASK 0xFFFFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_DPG_VCPU_CACHE_OFFSET0
+#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0
+#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x01FFFFFFL
+//UVD_DPG_LMI_VCPU_CACHE_VMID
+#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0
+#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL
+//UVD_REG_FILTER_EN
+#define UVD_REG_FILTER_EN__UVD_REG_FILTER_EN__SHIFT 0x0
+#define UVD_REG_FILTER_EN__MMSCH_HI_PRIV__SHIFT 0x1
+#define UVD_REG_FILTER_EN__VIDEO_PRIV_EN__SHIFT 0x2
+#define UVD_REG_FILTER_EN__JPEG_PRIV_EN__SHIFT 0x3
+#define UVD_REG_FILTER_EN__UVD_REG_FILTER_EN_MASK 0x00000001L
+#define UVD_REG_FILTER_EN__MMSCH_HI_PRIV_MASK 0x00000002L
+#define UVD_REG_FILTER_EN__VIDEO_PRIV_EN_MASK 0x00000004L
+#define UVD_REG_FILTER_EN__JPEG_PRIV_EN_MASK 0x00000008L
+//UVD_SECURITY_REG_VIO_REPORT
+#define UVD_SECURITY_REG_VIO_REPORT__HOST_REG_VIO__SHIFT 0x0
+#define UVD_SECURITY_REG_VIO_REPORT__VCPU_REG_VIO__SHIFT 0x1
+#define UVD_SECURITY_REG_VIO_REPORT__VIDEO_REG_VIO__SHIFT 0x2
+#define UVD_SECURITY_REG_VIO_REPORT__DPG_REG_VIO__SHIFT 0x3
+#define UVD_SECURITY_REG_VIO_REPORT__JPEG_REG_VIO__SHIFT 0x4
+#define UVD_SECURITY_REG_VIO_REPORT__JDPG_REG_VIO__SHIFT 0x5
+#define UVD_SECURITY_REG_VIO_REPORT__HOST_REG_VIO_MASK 0x00000001L
+#define UVD_SECURITY_REG_VIO_REPORT__VCPU_REG_VIO_MASK 0x00000002L
+#define UVD_SECURITY_REG_VIO_REPORT__VIDEO_REG_VIO_MASK 0x00000004L
+#define UVD_SECURITY_REG_VIO_REPORT__DPG_REG_VIO_MASK 0x00000008L
+#define UVD_SECURITY_REG_VIO_REPORT__JPEG_REG_VIO_MASK 0x00000010L
+#define UVD_SECURITY_REG_VIO_REPORT__JDPG_REG_VIO_MASK 0x00000020L
+//UVD_FW_VERSION
+#define UVD_FW_VERSION__FW_VERSION__SHIFT 0x0
+#define UVD_FW_VERSION__FW_VERSION_MASK 0xFFFFFFFFL
+//UVD_PF_STATUS
+#define UVD_PF_STATUS__JPEG_PF_OCCURED__SHIFT 0x0
+#define UVD_PF_STATUS__NJ_PF_OCCURED__SHIFT 0x1
+#define UVD_PF_STATUS__ENCODER0_PF_OCCURED__SHIFT 0x2
+#define UVD_PF_STATUS__ENCODER1_PF_OCCURED__SHIFT 0x3
+#define UVD_PF_STATUS__ENCODER2_PF_OCCURED__SHIFT 0x4
+#define UVD_PF_STATUS__ENCODER3_PF_OCCURED__SHIFT 0x5
+#define UVD_PF_STATUS__ENCODER4_PF_OCCURED__SHIFT 0x6
+#define UVD_PF_STATUS__EJPEG_PF_OCCURED__SHIFT 0x7
+#define UVD_PF_STATUS__JPEG_PF_CLEAR__SHIFT 0x8
+#define UVD_PF_STATUS__NJ_PF_CLEAR__SHIFT 0x9
+#define UVD_PF_STATUS__ENCODER0_PF_CLEAR__SHIFT 0xa
+#define UVD_PF_STATUS__ENCODER1_PF_CLEAR__SHIFT 0xb
+#define UVD_PF_STATUS__ENCODER2_PF_CLEAR__SHIFT 0xc
+#define UVD_PF_STATUS__ENCODER3_PF_CLEAR__SHIFT 0xd
+#define UVD_PF_STATUS__ENCODER4_PF_CLEAR__SHIFT 0xe
+#define UVD_PF_STATUS__EJPEG_PF_CLEAR__SHIFT 0xf
+#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED__SHIFT 0x10
+#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED__SHIFT 0x11
+#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED__SHIFT 0x12
+#define UVD_PF_STATUS__JPEG2_PF_OCCURED__SHIFT 0x13
+#define UVD_PF_STATUS__DJ2_ATM_PF_OCCURED__SHIFT 0x14
+#define UVD_PF_STATUS__JPEG2_PF_CLEAR__SHIFT 0x15
+#define UVD_PF_STATUS__ENCODER5_PF_OCCURED__SHIFT 0x16
+#define UVD_PF_STATUS__ENCODER5_PF_CLEAR__SHIFT 0x17
+#define UVD_PF_STATUS__JPEG_PF_OCCURED_MASK 0x00000001L
+#define UVD_PF_STATUS__NJ_PF_OCCURED_MASK 0x00000002L
+#define UVD_PF_STATUS__ENCODER0_PF_OCCURED_MASK 0x00000004L
+#define UVD_PF_STATUS__ENCODER1_PF_OCCURED_MASK 0x00000008L
+#define UVD_PF_STATUS__ENCODER2_PF_OCCURED_MASK 0x00000010L
+#define UVD_PF_STATUS__ENCODER3_PF_OCCURED_MASK 0x00000020L
+#define UVD_PF_STATUS__ENCODER4_PF_OCCURED_MASK 0x00000040L
+#define UVD_PF_STATUS__EJPEG_PF_OCCURED_MASK 0x00000080L
+#define UVD_PF_STATUS__JPEG_PF_CLEAR_MASK 0x00000100L
+#define UVD_PF_STATUS__NJ_PF_CLEAR_MASK 0x00000200L
+#define UVD_PF_STATUS__ENCODER0_PF_CLEAR_MASK 0x00000400L
+#define UVD_PF_STATUS__ENCODER1_PF_CLEAR_MASK 0x00000800L
+#define UVD_PF_STATUS__ENCODER2_PF_CLEAR_MASK 0x00001000L
+#define UVD_PF_STATUS__ENCODER3_PF_CLEAR_MASK 0x00002000L
+#define UVD_PF_STATUS__ENCODER4_PF_CLEAR_MASK 0x00004000L
+#define UVD_PF_STATUS__EJPEG_PF_CLEAR_MASK 0x00008000L
+#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED_MASK 0x00010000L
+#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED_MASK 0x00020000L
+#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED_MASK 0x00040000L
+#define UVD_PF_STATUS__JPEG2_PF_OCCURED_MASK 0x00080000L
+#define UVD_PF_STATUS__DJ2_ATM_PF_OCCURED_MASK 0x00100000L
+#define UVD_PF_STATUS__JPEG2_PF_CLEAR_MASK 0x00200000L
+#define UVD_PF_STATUS__ENCODER5_PF_OCCURED_MASK 0x00400000L
+#define UVD_PF_STATUS__ENCODER5_PF_CLEAR_MASK 0x00800000L
+//UVD_DPG_CLK_EN_VCPU_REPORT
+#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN__SHIFT 0x0
+#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT__SHIFT 0x1
+#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN_MASK 0x00000001L
+#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT_MASK 0x000000FEL
+//CC_UVD_VCPU_ERR_DETECT_BOT_LO
+#define CC_UVD_VCPU_ERR_DETECT_BOT_LO__UVD_VCPU_ERR_DETECT_BOT_LO__SHIFT 0xc
+#define CC_UVD_VCPU_ERR_DETECT_BOT_LO__UVD_VCPU_ERR_DETECT_BOT_LO_MASK 0xFFFFF000L
+//CC_UVD_VCPU_ERR_DETECT_BOT_HI
+#define CC_UVD_VCPU_ERR_DETECT_BOT_HI__UVD_VCPU_ERR_DETECT_BOT_HI__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_DETECT_BOT_HI__UVD_VCPU_ERR_DETECT_BOT_HI_MASK 0x0000FFFFL
+//CC_UVD_VCPU_ERR_DETECT_TOP_LO
+#define CC_UVD_VCPU_ERR_DETECT_TOP_LO__UVD_VCPU_ERR_DETECT_TOP_LO__SHIFT 0xc
+#define CC_UVD_VCPU_ERR_DETECT_TOP_LO__UVD_VCPU_ERR_DETECT_TOP_LO_MASK 0xFFFFF000L
+//CC_UVD_VCPU_ERR_DETECT_TOP_HI
+#define CC_UVD_VCPU_ERR_DETECT_TOP_HI__UVD_VCPU_ERR_DETECT_TOP_HI__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_DETECT_TOP_HI__UVD_VCPU_ERR_DETECT_TOP_HI_MASK 0x0000FFFFL
+//CC_UVD_VCPU_ERR
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_STATUS__SHIFT 0x0
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_CLEAR__SHIFT 0x1
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_DETECT_EN__SHIFT 0x2
+#define CC_UVD_VCPU_ERR__UVD_TMZ_DBG_DIS__SHIFT 0x3
+#define CC_UVD_VCPU_ERR__RESET_ON_FAULT__SHIFT 0x4
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_STATUS_MASK 0x00000001L
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_CLEAR_MASK 0x00000002L
+#define CC_UVD_VCPU_ERR__UVD_VCPU_ERR_DETECT_EN_MASK 0x00000004L
+#define CC_UVD_VCPU_ERR__UVD_TMZ_DBG_DIS_MASK 0x00000008L
+#define CC_UVD_VCPU_ERR__RESET_ON_FAULT_MASK 0x00000010L
+//CC_UVD_VCPU_ERR_INST_ADDR_LO
+#define CC_UVD_VCPU_ERR_INST_ADDR_LO__UVD_VCPU_ERR_INST_ADDR_LO__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_INST_ADDR_LO__UVD_VCPU_ERR_INST_ADDR_LO_MASK 0xFFFFFFFFL
+//CC_UVD_VCPU_ERR_INST_ADDR_HI
+#define CC_UVD_VCPU_ERR_INST_ADDR_HI__UVD_VCPU_ERR_INST_ADDR_HI__SHIFT 0x0
+#define CC_UVD_VCPU_ERR_INST_ADDR_HI__UVD_VCPU_ERR_INST_ADDR_HI_MASK 0x0000FFFFL
+//UVD_LMI_MMSCH_NC_SPACE
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC0_SPACE__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC1_SPACE__SHIFT 0x3
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC2_SPACE__SHIFT 0x6
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC3_SPACE__SHIFT 0x9
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC4_SPACE__SHIFT 0xc
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC5_SPACE__SHIFT 0xf
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC6_SPACE__SHIFT 0x12
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC7_SPACE__SHIFT 0x15
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC0_SPACE_MASK 0x00000007L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC1_SPACE_MASK 0x00000038L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC2_SPACE_MASK 0x000001C0L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC3_SPACE_MASK 0x00000E00L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC4_SPACE_MASK 0x00007000L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC5_SPACE_MASK 0x00038000L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC6_SPACE_MASK 0x001C0000L
+#define UVD_LMI_MMSCH_NC_SPACE__MMSCH_NC7_SPACE_MASK 0x00E00000L
+//UVD_LMI_ATOMIC_SPACE
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER0_SPACE__SHIFT 0x0
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER1_SPACE__SHIFT 0x3
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER2_SPACE__SHIFT 0x6
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER3_SPACE__SHIFT 0x9
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER0_SPACE_MASK 0x00000007L
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER1_SPACE_MASK 0x00000038L
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER2_SPACE_MASK 0x000001C0L
+#define UVD_LMI_ATOMIC_SPACE__ATOMIC_USER3_SPACE_MASK 0x00000E00L
+//UVD_GFX8_ADDR_CONFIG
+#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//UVD_GFX10_ADDR_CONFIG
+#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define UVD_GFX10_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define UVD_GFX10_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc
+#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define UVD_GFX10_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define UVD_GFX10_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L
+#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+//UVD_GPCNT2_CNTL
+#define UVD_GPCNT2_CNTL__CLR__SHIFT 0x0
+#define UVD_GPCNT2_CNTL__START__SHIFT 0x1
+#define UVD_GPCNT2_CNTL__COUNTUP__SHIFT 0x2
+#define UVD_GPCNT2_CNTL__CLR_MASK 0x00000001L
+#define UVD_GPCNT2_CNTL__START_MASK 0x00000002L
+#define UVD_GPCNT2_CNTL__COUNTUP_MASK 0x00000004L
+//UVD_GPCNT2_TARGET_LOWER
+#define UVD_GPCNT2_TARGET_LOWER__TARGET__SHIFT 0x0
+#define UVD_GPCNT2_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL
+//UVD_GPCNT2_STATUS_LOWER
+#define UVD_GPCNT2_STATUS_LOWER__COUNT__SHIFT 0x0
+#define UVD_GPCNT2_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL
+//UVD_GPCNT2_TARGET_UPPER
+#define UVD_GPCNT2_TARGET_UPPER__TARGET__SHIFT 0x0
+#define UVD_GPCNT2_TARGET_UPPER__TARGET_MASK 0x0000FFFFL
+//UVD_GPCNT2_STATUS_UPPER
+#define UVD_GPCNT2_STATUS_UPPER__COUNT__SHIFT 0x0
+#define UVD_GPCNT2_STATUS_UPPER__COUNT_MASK 0x0000FFFFL
+//UVD_GPCNT3_CNTL
+#define UVD_GPCNT3_CNTL__CLR__SHIFT 0x0
+#define UVD_GPCNT3_CNTL__START__SHIFT 0x1
+#define UVD_GPCNT3_CNTL__COUNTUP__SHIFT 0x2
+#define UVD_GPCNT3_CNTL__FREQ__SHIFT 0x3
+#define UVD_GPCNT3_CNTL__DIV__SHIFT 0xa
+#define UVD_GPCNT3_CNTL__CLR_MASK 0x00000001L
+#define UVD_GPCNT3_CNTL__START_MASK 0x00000002L
+#define UVD_GPCNT3_CNTL__COUNTUP_MASK 0x00000004L
+#define UVD_GPCNT3_CNTL__FREQ_MASK 0x000003F8L
+#define UVD_GPCNT3_CNTL__DIV_MASK 0x0001FC00L
+//UVD_GPCNT3_TARGET_LOWER
+#define UVD_GPCNT3_TARGET_LOWER__TARGET__SHIFT 0x0
+#define UVD_GPCNT3_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL
+//UVD_GPCNT3_STATUS_LOWER
+#define UVD_GPCNT3_STATUS_LOWER__COUNT__SHIFT 0x0
+#define UVD_GPCNT3_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL
+//UVD_GPCNT3_TARGET_UPPER
+#define UVD_GPCNT3_TARGET_UPPER__TARGET__SHIFT 0x0
+#define UVD_GPCNT3_TARGET_UPPER__TARGET_MASK 0x0000FFFFL
+//UVD_GPCNT3_STATUS_UPPER
+#define UVD_GPCNT3_STATUS_UPPER__COUNT__SHIFT 0x0
+#define UVD_GPCNT3_STATUS_UPPER__COUNT_MASK 0x0000FFFFL
+//UVD_VCLK_DS_CNTL
+#define UVD_VCLK_DS_CNTL__VCLK_DS_EN__SHIFT 0x0
+#define UVD_VCLK_DS_CNTL__VCLK_DS_STATUS__SHIFT 0x4
+#define UVD_VCLK_DS_CNTL__VCLK_DS_HYSTERESIS_CNT__SHIFT 0x10
+#define UVD_VCLK_DS_CNTL__VCLK_DS_EN_MASK 0x00000001L
+#define UVD_VCLK_DS_CNTL__VCLK_DS_STATUS_MASK 0x00000010L
+#define UVD_VCLK_DS_CNTL__VCLK_DS_HYSTERESIS_CNT_MASK 0xFFFF0000L
+//UVD_DCLK_DS_CNTL
+#define UVD_DCLK_DS_CNTL__DCLK_DS_EN__SHIFT 0x0
+#define UVD_DCLK_DS_CNTL__DCLK_DS_STATUS__SHIFT 0x4
+#define UVD_DCLK_DS_CNTL__DCLK_DS_HYSTERESIS_CNT__SHIFT 0x10
+#define UVD_DCLK_DS_CNTL__DCLK_DS_EN_MASK 0x00000001L
+#define UVD_DCLK_DS_CNTL__DCLK_DS_STATUS_MASK 0x00000010L
+#define UVD_DCLK_DS_CNTL__DCLK_DS_HYSTERESIS_CNT_MASK 0xFFFF0000L
+//UVD_TSC_LOWER
+#define UVD_TSC_LOWER__COUNT__SHIFT 0x0
+#define UVD_TSC_LOWER__COUNT_MASK 0xFFFFFFFFL
+//UVD_TSC_UPPER
+#define UVD_TSC_UPPER__COUNT__SHIFT 0x0
+#define UVD_TSC_UPPER__COUNT_MASK 0x00FFFFFFL
+//VCN_FEATURES
+#define VCN_FEATURES__HAS_VIDEO_DEC__SHIFT 0x0
+#define VCN_FEATURES__HAS_VIDEO_ENC__SHIFT 0x1
+#define VCN_FEATURES__HAS_MJPEG_DEC__SHIFT 0x2
+#define VCN_FEATURES__HAS_MJPEG_ENC__SHIFT 0x3
+#define VCN_FEATURES__HAS_VIDEO_VIRT__SHIFT 0x4
+#define VCN_FEATURES__HAS_H264_LEGACY_DEC__SHIFT 0x5
+#define VCN_FEATURES__HAS_UDEC_DEC__SHIFT 0x6
+#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC__SHIFT 0x7
+#define VCN_FEATURES__HAS_SCLR_DEC__SHIFT 0x8
+#define VCN_FEATURES__HAS_VP9_DEC__SHIFT 0x9
+#define VCN_FEATURES__HAS_AV1_DEC__SHIFT 0xa
+#define VCN_FEATURES__HAS_EFC_ENC__SHIFT 0xb
+#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC__SHIFT 0xc
+#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC__SHIFT 0xd
+#define VCN_FEATURES__HAS_AV1_ENC__SHIFT 0xe
+#define VCN_FEATURES__INSTANCE_ID__SHIFT 0x1c
+#define VCN_FEATURES__HAS_VIDEO_DEC_MASK 0x00000001L
+#define VCN_FEATURES__HAS_VIDEO_ENC_MASK 0x00000002L
+#define VCN_FEATURES__HAS_MJPEG_DEC_MASK 0x00000004L
+#define VCN_FEATURES__HAS_MJPEG_ENC_MASK 0x00000008L
+#define VCN_FEATURES__HAS_VIDEO_VIRT_MASK 0x00000010L
+#define VCN_FEATURES__HAS_H264_LEGACY_DEC_MASK 0x00000020L
+#define VCN_FEATURES__HAS_UDEC_DEC_MASK 0x00000040L
+#define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC_MASK 0x00000080L
+#define VCN_FEATURES__HAS_SCLR_DEC_MASK 0x00000100L
+#define VCN_FEATURES__HAS_VP9_DEC_MASK 0x00000200L
+#define VCN_FEATURES__HAS_AV1_DEC_MASK 0x00000400L
+#define VCN_FEATURES__HAS_EFC_ENC_MASK 0x00000800L
+#define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC_MASK 0x00001000L
+#define VCN_FEATURES__HAS_DUAL_MJPEG_DEC_MASK 0x00002000L
+#define VCN_FEATURES__HAS_AV1_ENC_MASK 0x00004000L
+#define VCN_FEATURES__INSTANCE_ID_MASK 0xF0000000L
+//UVD_GPUIOV_STATUS
+#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE__SHIFT 0x0
+#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE_MASK 0x00000001L
+//UVD_RAS_VCPU_VCODEC_STATUS
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF_MASK 0x80000000L
+//UVD_RAS_MMSCH_FATAL_ERROR
+#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_PF_MASK 0x80000000L
+//UVD_RAS_JPEG0_STATUS
+#define UVD_RAS_JPEG0_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_JPEG0_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_JPEG0_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_JPEG0_STATUS__POISONED_PF_MASK 0x80000000L
+//UVD_RAS_JPEG1_STATUS
+#define UVD_RAS_JPEG1_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_JPEG1_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_JPEG1_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_JPEG1_STATUS__POISONED_PF_MASK 0x80000000L
+//UVD_RAS_CNTL_PMI_ARB
+#define UVD_RAS_CNTL_PMI_ARB__STAT_VCPU_VCODEC__SHIFT 0x0
+#define UVD_RAS_CNTL_PMI_ARB__ACK_VCPU_VCODEC__SHIFT 0x1
+#define UVD_RAS_CNTL_PMI_ARB__STAT_MMSCH__SHIFT 0x2
+#define UVD_RAS_CNTL_PMI_ARB__ACK_MMSCH__SHIFT 0x3
+#define UVD_RAS_CNTL_PMI_ARB__STAT_JPEG0__SHIFT 0x4
+#define UVD_RAS_CNTL_PMI_ARB__ACK_JPEG0__SHIFT 0x5
+#define UVD_RAS_CNTL_PMI_ARB__STAT_JPEG1__SHIFT 0x6
+#define UVD_RAS_CNTL_PMI_ARB__ACK_JPEG1__SHIFT 0x7
+#define UVD_RAS_CNTL_PMI_ARB__STAT_VCPU_VCODEC_MASK 0x00000001L
+#define UVD_RAS_CNTL_PMI_ARB__ACK_VCPU_VCODEC_MASK 0x00000002L
+#define UVD_RAS_CNTL_PMI_ARB__STAT_MMSCH_MASK 0x00000004L
+#define UVD_RAS_CNTL_PMI_ARB__ACK_MMSCH_MASK 0x00000008L
+#define UVD_RAS_CNTL_PMI_ARB__STAT_JPEG0_MASK 0x00000010L
+#define UVD_RAS_CNTL_PMI_ARB__ACK_JPEG0_MASK 0x00000020L
+#define UVD_RAS_CNTL_PMI_ARB__STAT_JPEG1_MASK 0x00000040L
+#define UVD_RAS_CNTL_PMI_ARB__ACK_JPEG1_MASK 0x00000080L
+//UVD_SCRATCH15
+#define UVD_SCRATCH15__SCRATCH15_DATA__SHIFT 0x0
+#define UVD_SCRATCH15__SCRATCH15_DATA_MASK 0xFFFFFFFFL
+//VCN_JPEG_DB_CTRL1
+#define VCN_JPEG_DB_CTRL1__OFFSET__SHIFT 0x2
+#define VCN_JPEG_DB_CTRL1__EN__SHIFT 0x1e
+#define VCN_JPEG_DB_CTRL1__HIT__SHIFT 0x1f
+#define VCN_JPEG_DB_CTRL1__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_JPEG_DB_CTRL1__EN_MASK 0x40000000L
+#define VCN_JPEG_DB_CTRL1__HIT_MASK 0x80000000L
+//VCN_JPEG_DB_CTRL2
+#define VCN_JPEG_DB_CTRL2__OFFSET__SHIFT 0x2
+#define VCN_JPEG_DB_CTRL2__EN__SHIFT 0x1e
+#define VCN_JPEG_DB_CTRL2__HIT__SHIFT 0x1f
+#define VCN_JPEG_DB_CTRL2__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_JPEG_DB_CTRL2__EN_MASK 0x40000000L
+#define VCN_JPEG_DB_CTRL2__HIT_MASK 0x80000000L
+//VCN_JPEG_DB_CTRL3
+#define VCN_JPEG_DB_CTRL3__OFFSET__SHIFT 0x2
+#define VCN_JPEG_DB_CTRL3__EN__SHIFT 0x1e
+#define VCN_JPEG_DB_CTRL3__HIT__SHIFT 0x1f
+#define VCN_JPEG_DB_CTRL3__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_JPEG_DB_CTRL3__EN_MASK 0x40000000L
+#define VCN_JPEG_DB_CTRL3__HIT_MASK 0x80000000L
+//VCN_JPEG_DB_CTRL4
+#define VCN_JPEG_DB_CTRL4__OFFSET__SHIFT 0x2
+#define VCN_JPEG_DB_CTRL4__EN__SHIFT 0x1e
+#define VCN_JPEG_DB_CTRL4__HIT__SHIFT 0x1f
+#define VCN_JPEG_DB_CTRL4__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_JPEG_DB_CTRL4__EN_MASK 0x40000000L
+#define VCN_JPEG_DB_CTRL4__HIT_MASK 0x80000000L
+//VCN_JPEG_DB_CTRL5
+#define VCN_JPEG_DB_CTRL5__OFFSET__SHIFT 0x2
+#define VCN_JPEG_DB_CTRL5__EN__SHIFT 0x1e
+#define VCN_JPEG_DB_CTRL5__HIT__SHIFT 0x1f
+#define VCN_JPEG_DB_CTRL5__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_JPEG_DB_CTRL5__EN_MASK 0x40000000L
+#define VCN_JPEG_DB_CTRL5__HIT_MASK 0x80000000L
+//VCN_JPEG_DB_CTRL6
+#define VCN_JPEG_DB_CTRL6__OFFSET__SHIFT 0x2
+#define VCN_JPEG_DB_CTRL6__EN__SHIFT 0x1e
+#define VCN_JPEG_DB_CTRL6__HIT__SHIFT 0x1f
+#define VCN_JPEG_DB_CTRL6__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_JPEG_DB_CTRL6__EN_MASK 0x40000000L
+#define VCN_JPEG_DB_CTRL6__HIT_MASK 0x80000000L
+//VCN_JPEG_DB_CTRL7
+#define VCN_JPEG_DB_CTRL7__OFFSET__SHIFT 0x2
+#define VCN_JPEG_DB_CTRL7__EN__SHIFT 0x1e
+#define VCN_JPEG_DB_CTRL7__HIT__SHIFT 0x1f
+#define VCN_JPEG_DB_CTRL7__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_JPEG_DB_CTRL7__EN_MASK 0x40000000L
+#define VCN_JPEG_DB_CTRL7__HIT_MASK 0x80000000L
+//UVD_SCRATCH32
+#define UVD_SCRATCH32__SCRATCH32_DATA__SHIFT 0x0
+#define UVD_SCRATCH32__SCRATCH32_DATA_MASK 0xFFFFFFFFL
+//UVD_VERSION
+#define UVD_VERSION__VARIANT_TYPE__SHIFT 0x0
+#define UVD_VERSION__MINOR_VERSION__SHIFT 0x8
+#define UVD_VERSION__MAJOR_VERSION__SHIFT 0x10
+#define UVD_VERSION__INSTANCE_ID__SHIFT 0x1c
+#define UVD_VERSION__VARIANT_TYPE_MASK 0x000000FFL
+#define UVD_VERSION__MINOR_VERSION_MASK 0x0000FF00L
+#define UVD_VERSION__MAJOR_VERSION_MASK 0x0FFF0000L
+#define UVD_VERSION__INSTANCE_ID_MASK 0xF0000000L
+//VCN_RB_DB_CTRL
+#define VCN_RB_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_JPEG_DB_CTRL
+#define VCN_JPEG_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_JPEG_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_JPEG_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_JPEG_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_JPEG_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_JPEG_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB1_DB_CTRL
+#define VCN_RB1_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB1_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB1_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB1_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB1_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB1_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB2_DB_CTRL
+#define VCN_RB2_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB2_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB2_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB2_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB2_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB2_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB3_DB_CTRL
+#define VCN_RB3_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB3_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB3_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB3_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB3_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB3_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB4_DB_CTRL
+#define VCN_RB4_DB_CTRL__OFFSET__SHIFT 0x2
+#define VCN_RB4_DB_CTRL__EN__SHIFT 0x1e
+#define VCN_RB4_DB_CTRL__HIT__SHIFT 0x1f
+#define VCN_RB4_DB_CTRL__OFFSET_MASK 0x0FFFFFFCL
+#define VCN_RB4_DB_CTRL__EN_MASK 0x40000000L
+#define VCN_RB4_DB_CTRL__HIT_MASK 0x80000000L
+//VCN_RB_ENABLE
+#define VCN_RB_ENABLE__RB_EN__SHIFT 0x0
+#define VCN_RB_ENABLE__JPEG_RB_EN__SHIFT 0x1
+#define VCN_RB_ENABLE__RB1_EN__SHIFT 0x2
+#define VCN_RB_ENABLE__RB2_EN__SHIFT 0x3
+#define VCN_RB_ENABLE__RB3_EN__SHIFT 0x4
+#define VCN_RB_ENABLE__RB4_EN__SHIFT 0x5
+#define VCN_RB_ENABLE__UMSCH_RB_EN__SHIFT 0x6
+#define VCN_RB_ENABLE__EJPEG_RB_EN__SHIFT 0x7
+#define VCN_RB_ENABLE__AUDIO_RB_EN__SHIFT 0x8
+#define VCN_RB_ENABLE__RB_EN_MASK 0x00000001L
+#define VCN_RB_ENABLE__JPEG_RB_EN_MASK 0x00000002L
+#define VCN_RB_ENABLE__RB1_EN_MASK 0x00000004L
+#define VCN_RB_ENABLE__RB2_EN_MASK 0x00000008L
+#define VCN_RB_ENABLE__RB3_EN_MASK 0x00000010L
+#define VCN_RB_ENABLE__RB4_EN_MASK 0x00000020L
+#define VCN_RB_ENABLE__UMSCH_RB_EN_MASK 0x00000040L
+#define VCN_RB_ENABLE__EJPEG_RB_EN_MASK 0x00000080L
+#define VCN_RB_ENABLE__AUDIO_RB_EN_MASK 0x00000100L
+//VCN_RB_WPTR_CTRL
+#define VCN_RB_WPTR_CTRL__RB_CS_EN__SHIFT 0x0
+#define VCN_RB_WPTR_CTRL__JPEG_CS_EN__SHIFT 0x1
+#define VCN_RB_WPTR_CTRL__RB1_CS_EN__SHIFT 0x2
+#define VCN_RB_WPTR_CTRL__RB2_CS_EN__SHIFT 0x3
+#define VCN_RB_WPTR_CTRL__RB3_CS_EN__SHIFT 0x4
+#define VCN_RB_WPTR_CTRL__RB4_CS_EN__SHIFT 0x5
+#define VCN_RB_WPTR_CTRL__UMSCH_RB_CS_EN__SHIFT 0x6
+#define VCN_RB_WPTR_CTRL__EJPEG_RB_CS_EN__SHIFT 0x7
+#define VCN_RB_WPTR_CTRL__AUDIO_RB_CS_EN__SHIFT 0x8
+#define VCN_RB_WPTR_CTRL__RB_CS_EN_MASK 0x00000001L
+#define VCN_RB_WPTR_CTRL__JPEG_CS_EN_MASK 0x00000002L
+#define VCN_RB_WPTR_CTRL__RB1_CS_EN_MASK 0x00000004L
+#define VCN_RB_WPTR_CTRL__RB2_CS_EN_MASK 0x00000008L
+#define VCN_RB_WPTR_CTRL__RB3_CS_EN_MASK 0x00000010L
+#define VCN_RB_WPTR_CTRL__RB4_CS_EN_MASK 0x00000020L
+#define VCN_RB_WPTR_CTRL__UMSCH_RB_CS_EN_MASK 0x00000040L
+#define VCN_RB_WPTR_CTRL__EJPEG_RB_CS_EN_MASK 0x00000080L
+#define VCN_RB_WPTR_CTRL__AUDIO_RB_CS_EN_MASK 0x00000100L
+//UVD_RB_RPTR
+#define UVD_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR
+#define UVD_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_RPTR2
+#define UVD_RB_RPTR2__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR2__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR2
+#define UVD_RB_WPTR2__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR2__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_RPTR3
+#define UVD_RB_RPTR3__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR3__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR3
+#define UVD_RB_WPTR3__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR3__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RB_RPTR4
+#define UVD_RB_RPTR4__RB_RPTR__SHIFT 0x4
+#define UVD_RB_RPTR4__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RB_WPTR4
+#define UVD_RB_WPTR4__RB_WPTR__SHIFT 0x4
+#define UVD_RB_WPTR4__RB_WPTR_MASK 0x007FFFF0L
+//UVD_OUT_RB_RPTR
+#define UVD_OUT_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_OUT_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_OUT_RB_WPTR
+#define UVD_OUT_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_OUT_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_AUDIO_RB_RPTR
+#define UVD_AUDIO_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_AUDIO_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_AUDIO_RB_WPTR
+#define UVD_AUDIO_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_AUDIO_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_RBC_RB_RPTR
+#define UVD_RBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_RBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_RBC_RB_WPTR
+#define UVD_RBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_RBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_DPG_LMA_CTL2
+#define UVD_DPG_LMA_CTL2__DIRECT_ACCESS_SRAM_SEL__SHIFT 0x0
+#define UVD_DPG_LMA_CTL2__FIFO_DIRECT_ACCESS_EN__SHIFT 0x1
+#define UVD_DPG_LMA_CTL2__VID_WRITE_PTR__SHIFT 0x2
+#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR__SHIFT 0x9
+#define UVD_DPG_LMA_CTL2__DIRECT_ACCESS_SRAM_SEL_MASK 0x00000001L
+#define UVD_DPG_LMA_CTL2__FIFO_DIRECT_ACCESS_EN_MASK 0x00000002L
+#define UVD_DPG_LMA_CTL2__VID_WRITE_PTR_MASK 0x000001FCL
+#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L
+
+
+// addressBlock: aid_uvd0_mmsch_dec
+//MMSCH_UCODE_ADDR
+#define MMSCH_UCODE_ADDR__UCODE_ADDR__SHIFT 0x2
+#define MMSCH_UCODE_ADDR__UCODE_LOCK__SHIFT 0x1f
+#define MMSCH_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFCL
+#define MMSCH_UCODE_ADDR__UCODE_LOCK_MASK 0x80000000L
+//MMSCH_UCODE_DATA
+#define MMSCH_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define MMSCH_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//MMSCH_SRAM_ADDR
+#define MMSCH_SRAM_ADDR__SRAM_ADDR__SHIFT 0x2
+#define MMSCH_SRAM_ADDR__SRAM_LOCK__SHIFT 0x1f
+#define MMSCH_SRAM_ADDR__SRAM_ADDR_MASK 0x00001FFCL
+#define MMSCH_SRAM_ADDR__SRAM_LOCK_MASK 0x80000000L
+//MMSCH_SRAM_DATA
+#define MMSCH_SRAM_DATA__SRAM_DATA__SHIFT 0x0
+#define MMSCH_SRAM_DATA__SRAM_DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_SRAM_OFFSET
+#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_OFFSET__SHIFT 0x2
+#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_NUM_DW_PER_VF__SHIFT 0x10
+#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_OFFSET_MASK 0x00001FFCL
+#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_NUM_DW_PER_VF_MASK 0x00FF0000L
+//MMSCH_DB_SRAM_OFFSET
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_OFFSET__SHIFT 0x2
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_ENG__SHIFT 0x10
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_RING_PER_ENG__SHIFT 0x18
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_OFFSET_MASK 0x00001FFCL
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_ENG_MASK 0x00FF0000L
+#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_RING_PER_ENG_MASK 0xFF000000L
+//MMSCH_CTX_SRAM_OFFSET
+#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_OFFSET__SHIFT 0x2
+#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_SIZE__SHIFT 0x10
+#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_OFFSET_MASK 0x00001FFCL
+#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_SIZE_MASK 0xFFFF0000L
+//MMSCH_CTL
+#define MMSCH_CTL__P_RUNSTALL__SHIFT 0x0
+#define MMSCH_CTL__P_RESET__SHIFT 0x1
+#define MMSCH_CTL__VFID_FIFO_EN__SHIFT 0x4
+#define MMSCH_CTL__P_LOCK__SHIFT 0x1f
+#define MMSCH_CTL__P_RUNSTALL_MASK 0x00000001L
+#define MMSCH_CTL__P_RESET_MASK 0x00000002L
+#define MMSCH_CTL__VFID_FIFO_EN_MASK 0x00000010L
+#define MMSCH_CTL__P_LOCK_MASK 0x80000000L
+//MMSCH_INTR
+#define MMSCH_INTR__INTR__SHIFT 0x0
+#define MMSCH_INTR__INTR_MASK 0x00001FFFL
+//MMSCH_INTR_ACK
+#define MMSCH_INTR_ACK__INTR__SHIFT 0x0
+#define MMSCH_INTR_ACK__INTR_MASK 0x00001FFFL
+//MMSCH_INTR_STATUS
+#define MMSCH_INTR_STATUS__INTR__SHIFT 0x0
+#define MMSCH_INTR_STATUS__INTR_MASK 0x00001FFFL
+//MMSCH_VF_VMID
+#define MMSCH_VF_VMID__VF_CTX_VMID__SHIFT 0x0
+#define MMSCH_VF_VMID__VF_GPCOM_VMID__SHIFT 0x5
+#define MMSCH_VF_VMID__VF_CTX_VMID_MASK 0x0000001FL
+#define MMSCH_VF_VMID__VF_GPCOM_VMID_MASK 0x000003E0L
+//MMSCH_VF_CTX_ADDR_LO
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO__SHIFT 0x6
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO_MASK 0xFFFFFFC0L
+//MMSCH_VF_CTX_ADDR_HI
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI__SHIFT 0x0
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI_MASK 0xFFFFFFFFL
+//MMSCH_VF_CTX_SIZE
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE__SHIFT 0x0
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE_MASK 0xFFFFFFFFL
+//MMSCH_VF_GPCOM_ADDR_LO
+#define MMSCH_VF_GPCOM_ADDR_LO__VF_GPCOM_ADDR_LO__SHIFT 0x6
+#define MMSCH_VF_GPCOM_ADDR_LO__VF_GPCOM_ADDR_LO_MASK 0xFFFFFFC0L
+//MMSCH_VF_GPCOM_ADDR_HI
+#define MMSCH_VF_GPCOM_ADDR_HI__VF_GPCOM_ADDR_HI__SHIFT 0x0
+#define MMSCH_VF_GPCOM_ADDR_HI__VF_GPCOM_ADDR_HI_MASK 0xFFFFFFFFL
+//MMSCH_VF_GPCOM_SIZE
+#define MMSCH_VF_GPCOM_SIZE__VF_GPCOM_SIZE__SHIFT 0x0
+#define MMSCH_VF_GPCOM_SIZE__VF_GPCOM_SIZE_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_HOST
+#define MMSCH_VF_MAILBOX_HOST__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_HOST__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_RESP
+#define MMSCH_VF_MAILBOX_RESP__RESP__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_0
+#define MMSCH_VF_MAILBOX_0__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_0__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_0_RESP
+#define MMSCH_VF_MAILBOX_0_RESP__RESP__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_0_RESP__RESP_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_1
+#define MMSCH_VF_MAILBOX_1__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_1__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_1_RESP
+#define MMSCH_VF_MAILBOX_1_RESP__RESP__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_1_RESP__RESP_MASK 0xFFFFFFFFL
+//MMSCH_CNTL
+#define MMSCH_CNTL__CLK_EN__SHIFT 0x0
+#define MMSCH_CNTL__ED_ENABLE__SHIFT 0x1
+#define MMSCH_CNTL__AXI_MAX_BRST_SIZE_IS_4__SHIFT 0x2
+#define MMSCH_CNTL__AXI_40BIT_PIF_ADDR_FIX_EN__SHIFT 0x3
+#define MMSCH_CNTL__PDEBUG_ENABLE__SHIFT 0x4
+#define MMSCH_CNTL__MMSCH_IRQ_ERR__SHIFT 0x5
+#define MMSCH_CNTL__MMSCH_NACK_INTR_EN__SHIFT 0x9
+#define MMSCH_CNTL__MMSCH_DB_BUSY_INTR_EN__SHIFT 0xa
+#define MMSCH_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14
+#define MMSCH_CNTL__TIMEOUT_DIS__SHIFT 0x1c
+#define MMSCH_CNTL__MMSCH_IDLE__SHIFT 0x1d
+#define MMSCH_CNTL__CLK_EN_MASK 0x00000001L
+#define MMSCH_CNTL__ED_ENABLE_MASK 0x00000002L
+#define MMSCH_CNTL__AXI_MAX_BRST_SIZE_IS_4_MASK 0x00000004L
+#define MMSCH_CNTL__AXI_40BIT_PIF_ADDR_FIX_EN_MASK 0x00000008L
+#define MMSCH_CNTL__PDEBUG_ENABLE_MASK 0x00000010L
+#define MMSCH_CNTL__MMSCH_IRQ_ERR_MASK 0x000001E0L
+#define MMSCH_CNTL__MMSCH_NACK_INTR_EN_MASK 0x00000200L
+#define MMSCH_CNTL__MMSCH_DB_BUSY_INTR_EN_MASK 0x00000400L
+#define MMSCH_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L
+#define MMSCH_CNTL__TIMEOUT_DIS_MASK 0x10000000L
+#define MMSCH_CNTL__MMSCH_IDLE_MASK 0x20000000L
+//MMSCH_NONCACHE_OFFSET0
+#define MMSCH_NONCACHE_OFFSET0__OFFSET__SHIFT 0x0
+#define MMSCH_NONCACHE_OFFSET0__OFFSET_MASK 0x0FFFFFFFL
+//MMSCH_NONCACHE_SIZE0
+#define MMSCH_NONCACHE_SIZE0__SIZE__SHIFT 0x0
+#define MMSCH_NONCACHE_SIZE0__SIZE_MASK 0x00FFFFFFL
+//MMSCH_NONCACHE_OFFSET1
+#define MMSCH_NONCACHE_OFFSET1__OFFSET__SHIFT 0x0
+#define MMSCH_NONCACHE_OFFSET1__OFFSET_MASK 0x0FFFFFFFL
+//MMSCH_NONCACHE_SIZE1
+#define MMSCH_NONCACHE_SIZE1__SIZE__SHIFT 0x0
+#define MMSCH_NONCACHE_SIZE1__SIZE_MASK 0x00FFFFFFL
+//MMSCH_PROC_STATE1
+#define MMSCH_PROC_STATE1__PC__SHIFT 0x0
+#define MMSCH_PROC_STATE1__PC_MASK 0xFFFFFFFFL
+//MMSCH_LAST_MC_ADDR
+#define MMSCH_LAST_MC_ADDR__MC_ADDR__SHIFT 0x0
+#define MMSCH_LAST_MC_ADDR__RW__SHIFT 0x1f
+#define MMSCH_LAST_MC_ADDR__MC_ADDR_MASK 0x0FFFFFFFL
+#define MMSCH_LAST_MC_ADDR__RW_MASK 0x80000000L
+//MMSCH_LAST_MEM_ACCESS_HI
+#define MMSCH_LAST_MEM_ACCESS_HI__PROC_CMD__SHIFT 0x0
+#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_RPTR__SHIFT 0x8
+#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_WPTR__SHIFT 0xc
+#define MMSCH_LAST_MEM_ACCESS_HI__PROC_CMD_MASK 0x00000007L
+#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_RPTR_MASK 0x00000700L
+#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_WPTR_MASK 0x00007000L
+//MMSCH_LAST_MEM_ACCESS_LO
+#define MMSCH_LAST_MEM_ACCESS_LO__PROC_ADDR__SHIFT 0x0
+#define MMSCH_LAST_MEM_ACCESS_LO__PROC_ADDR_MASK 0xFFFFFFFFL
+//MMSCH_IOV_ACTIVE_FCN_ID
+#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID__SHIFT 0x0
+#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF__SHIFT 0x1f
+#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID_MASK 0x0000001FL
+#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF_MASK 0x80000000L
+//MMSCH_SCRATCH_0
+#define MMSCH_SCRATCH_0__SCRATCH_0__SHIFT 0x0
+#define MMSCH_SCRATCH_0__SCRATCH_0_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_1
+#define MMSCH_SCRATCH_1__SCRATCH_1__SHIFT 0x0
+#define MMSCH_SCRATCH_1__SCRATCH_1_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_SCH_BLOCK_0
+#define MMSCH_GPUIOV_SCH_BLOCK_0__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_0__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_0__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_0__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_0__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_0__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_CONTROL_0
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_TYPE__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE__SHIFT 0x4
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_INTR_EN__SHIFT 0x5
+#define MMSCH_GPUIOV_CMD_CONTROL_0__VM_BUSY_INTR_EN__SHIFT 0x6
+#define MMSCH_GPUIOV_CMD_CONTROL_0__FUNCTINO_ID__SHIFT 0x8
+#define MMSCH_GPUIOV_CMD_CONTROL_0__NEXT_FUNCTINO_ID__SHIFT 0x10
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_TYPE_MASK 0x0000000FL
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_MASK 0x00000010L
+#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
+#define MMSCH_GPUIOV_CMD_CONTROL_0__VM_BUSY_INTR_EN_MASK 0x00000040L
+#define MMSCH_GPUIOV_CMD_CONTROL_0__FUNCTINO_ID_MASK 0x0000FF00L
+#define MMSCH_GPUIOV_CMD_CONTROL_0__NEXT_FUNCTINO_ID_MASK 0x00FF0000L
+//MMSCH_GPUIOV_CMD_STATUS_0
+#define MMSCH_GPUIOV_CMD_STATUS_0__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_0__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_VM_BUSY_STATUS_0
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_0__BUSY__SHIFT 0x0
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_0__BUSY_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCNS_0
+#define MMSCH_GPUIOV_ACTIVE_FCNS_0__ACTIVE_FCNS__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCNS_0__ACTIVE_FCNS_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_STATUS_MASK 0x00000F00L
+//MMSCH_GPUIOV_DW6_0
+#define MMSCH_GPUIOV_DW6_0__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW6_0__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW7_0
+#define MMSCH_GPUIOV_DW7_0__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW7_0__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW8_0
+#define MMSCH_GPUIOV_DW8_0__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW8_0__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_SCH_BLOCK_1
+#define MMSCH_GPUIOV_SCH_BLOCK_1__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_1__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_1__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_1__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_1__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_1__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_CONTROL_1
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_TYPE__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE__SHIFT 0x4
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_INTR_EN__SHIFT 0x5
+#define MMSCH_GPUIOV_CMD_CONTROL_1__VM_BUSY_INTR_EN__SHIFT 0x6
+#define MMSCH_GPUIOV_CMD_CONTROL_1__FUNCTINO_ID__SHIFT 0x8
+#define MMSCH_GPUIOV_CMD_CONTROL_1__NEXT_FUNCTINO_ID__SHIFT 0x10
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_TYPE_MASK 0x0000000FL
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_MASK 0x00000010L
+#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
+#define MMSCH_GPUIOV_CMD_CONTROL_1__VM_BUSY_INTR_EN_MASK 0x00000040L
+#define MMSCH_GPUIOV_CMD_CONTROL_1__FUNCTINO_ID_MASK 0x0000FF00L
+#define MMSCH_GPUIOV_CMD_CONTROL_1__NEXT_FUNCTINO_ID_MASK 0x00FF0000L
+//MMSCH_GPUIOV_CMD_STATUS_1
+#define MMSCH_GPUIOV_CMD_STATUS_1__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_1__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_VM_BUSY_STATUS_1
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_1__BUSY__SHIFT 0x0
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_1__BUSY_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCNS_1
+#define MMSCH_GPUIOV_ACTIVE_FCNS_1__ACTIVE_FCNS__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCNS_1__ACTIVE_FCNS_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_1
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_STATUS_MASK 0x00000F00L
+//MMSCH_GPUIOV_DW6_1
+#define MMSCH_GPUIOV_DW6_1__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW6_1__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW7_1
+#define MMSCH_GPUIOV_DW7_1__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW7_1__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW8_1
+#define MMSCH_GPUIOV_DW8_1__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW8_1__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_CNTXT
+#define MMSCH_GPUIOV_CNTXT__CNTXT_SIZE__SHIFT 0x0
+#define MMSCH_GPUIOV_CNTXT__CNTXT_LOCATION__SHIFT 0x7
+#define MMSCH_GPUIOV_CNTXT__CNTXT_OFFSET__SHIFT 0xa
+#define MMSCH_GPUIOV_CNTXT__CNTXT_SIZE_MASK 0x0000007FL
+#define MMSCH_GPUIOV_CNTXT__CNTXT_LOCATION_MASK 0x00000080L
+#define MMSCH_GPUIOV_CNTXT__CNTXT_OFFSET_MASK 0xFFFFFC00L
+//MMSCH_SCRATCH_2
+#define MMSCH_SCRATCH_2__SCRATCH_2__SHIFT 0x0
+#define MMSCH_SCRATCH_2__SCRATCH_2_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_3
+#define MMSCH_SCRATCH_3__SCRATCH_3__SHIFT 0x0
+#define MMSCH_SCRATCH_3__SCRATCH_3_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_4
+#define MMSCH_SCRATCH_4__SCRATCH_4__SHIFT 0x0
+#define MMSCH_SCRATCH_4__SCRATCH_4_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_5
+#define MMSCH_SCRATCH_5__SCRATCH_5__SHIFT 0x0
+#define MMSCH_SCRATCH_5__SCRATCH_5_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_6
+#define MMSCH_SCRATCH_6__SCRATCH_6__SHIFT 0x0
+#define MMSCH_SCRATCH_6__SCRATCH_6_MASK 0xFFFFFFFFL
+//MMSCH_SCRATCH_7
+#define MMSCH_SCRATCH_7__SCRATCH_7__SHIFT 0x0
+#define MMSCH_SCRATCH_7__SCRATCH_7_MASK 0xFFFFFFFFL
+//MMSCH_VFID_FIFO_HEAD_0
+#define MMSCH_VFID_FIFO_HEAD_0__HEAD__SHIFT 0x0
+#define MMSCH_VFID_FIFO_HEAD_0__HEAD_MASK 0x0000003FL
+//MMSCH_VFID_FIFO_TAIL_0
+#define MMSCH_VFID_FIFO_TAIL_0__TAIL__SHIFT 0x0
+#define MMSCH_VFID_FIFO_TAIL_0__TAIL_MASK 0x0000003FL
+//MMSCH_VFID_FIFO_HEAD_1
+#define MMSCH_VFID_FIFO_HEAD_1__HEAD__SHIFT 0x0
+#define MMSCH_VFID_FIFO_HEAD_1__HEAD_MASK 0x0000003FL
+//MMSCH_VFID_FIFO_TAIL_1
+#define MMSCH_VFID_FIFO_TAIL_1__TAIL__SHIFT 0x0
+#define MMSCH_VFID_FIFO_TAIL_1__TAIL_MASK 0x0000003FL
+//MMSCH_NACK_STATUS
+#define MMSCH_NACK_STATUS__WR_NACK_STATUS__SHIFT 0x0
+#define MMSCH_NACK_STATUS__RD_NACK_STATUS__SHIFT 0x2
+#define MMSCH_NACK_STATUS__WR_NACK_STATUS_MASK 0x00000003L
+#define MMSCH_NACK_STATUS__RD_NACK_STATUS_MASK 0x0000000CL
+//MMSCH_VF_MAILBOX0_DATA
+#define MMSCH_VF_MAILBOX0_DATA__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX0_DATA__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX1_DATA
+#define MMSCH_VF_MAILBOX1_DATA__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX1_DATA__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_SCH_BLOCK_IP_0
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_STATUS_IP_0
+#define MMSCH_GPUIOV_CMD_STATUS_IP_0__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_IP_0__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_STATUS_MASK 0x00000F00L
+//MMSCH_GPUIOV_SCH_BLOCK_IP_1
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_STATUS_IP_1
+#define MMSCH_GPUIOV_CMD_STATUS_IP_1__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_IP_1__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_STATUS_MASK 0x00000F00L
+//MMSCH_GPUIOV_CNTXT_IP
+#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_SIZE__SHIFT 0x0
+#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_LOCATION__SHIFT 0x7
+#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_SIZE_MASK 0x0000007FL
+#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_LOCATION_MASK 0x00000080L
+//MMSCH_GPUIOV_SCH_BLOCK_2
+#define MMSCH_GPUIOV_SCH_BLOCK_2__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_2__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_2__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_2__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_2__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_2__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_CONTROL_2
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_TYPE__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE__SHIFT 0x4
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_INTR_EN__SHIFT 0x5
+#define MMSCH_GPUIOV_CMD_CONTROL_2__VM_BUSY_INTR_EN__SHIFT 0x6
+#define MMSCH_GPUIOV_CMD_CONTROL_2__FUNCTINO_ID__SHIFT 0x8
+#define MMSCH_GPUIOV_CMD_CONTROL_2__NEXT_FUNCTINO_ID__SHIFT 0x10
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_TYPE_MASK 0x0000000FL
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_MASK 0x00000010L
+#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
+#define MMSCH_GPUIOV_CMD_CONTROL_2__VM_BUSY_INTR_EN_MASK 0x00000040L
+#define MMSCH_GPUIOV_CMD_CONTROL_2__FUNCTINO_ID_MASK 0x0000FF00L
+#define MMSCH_GPUIOV_CMD_CONTROL_2__NEXT_FUNCTINO_ID_MASK 0x00FF0000L
+//MMSCH_GPUIOV_CMD_STATUS_2
+#define MMSCH_GPUIOV_CMD_STATUS_2__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_2__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_VM_BUSY_STATUS_2
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_2__BUSY__SHIFT 0x0
+#define MMSCH_GPUIOV_VM_BUSY_STATUS_2__BUSY_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCNS_2
+#define MMSCH_GPUIOV_ACTIVE_FCNS_2__ACTIVE_FCNS__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCNS_2__ACTIVE_FCNS_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_2
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_STATUS_MASK 0x00000F00L
+//MMSCH_GPUIOV_DW6_2
+#define MMSCH_GPUIOV_DW6_2__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW6_2__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW7_2
+#define MMSCH_GPUIOV_DW7_2__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW7_2__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_DW8_2
+#define MMSCH_GPUIOV_DW8_2__DATA__SHIFT 0x0
+#define MMSCH_GPUIOV_DW8_2__DATA_MASK 0xFFFFFFFFL
+//MMSCH_GPUIOV_SCH_BLOCK_IP_2
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__VERSION__SHIFT 0x4
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__SIZE__SHIFT 0x8
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__ID_MASK 0x0000000FL
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__VERSION_MASK 0x000000F0L
+#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__SIZE_MASK 0x0000FF00L
+//MMSCH_GPUIOV_CMD_STATUS_IP_2
+#define MMSCH_GPUIOV_CMD_STATUS_IP_2__CMD_STATUS__SHIFT 0x0
+#define MMSCH_GPUIOV_CMD_STATUS_IP_2__CMD_STATUS_MASK 0x0000000FL
+//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID__SHIFT 0x0
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_STATUS__SHIFT 0x8
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_MASK 0x000000FFL
+#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_STATUS_MASK 0x00000F00L
+//MMSCH_VFID_FIFO_HEAD_2
+#define MMSCH_VFID_FIFO_HEAD_2__HEAD__SHIFT 0x0
+#define MMSCH_VFID_FIFO_HEAD_2__HEAD_MASK 0x0000003FL
+//MMSCH_VFID_FIFO_TAIL_2
+#define MMSCH_VFID_FIFO_TAIL_2__TAIL__SHIFT 0x0
+#define MMSCH_VFID_FIFO_TAIL_2__TAIL_MASK 0x0000003FL
+//MMSCH_VM_BUSY_STATUS_0
+#define MMSCH_VM_BUSY_STATUS_0__BUSY__SHIFT 0x0
+#define MMSCH_VM_BUSY_STATUS_0__BUSY_MASK 0xFFFFFFFFL
+//MMSCH_VM_BUSY_STATUS_1
+#define MMSCH_VM_BUSY_STATUS_1__BUSY__SHIFT 0x0
+#define MMSCH_VM_BUSY_STATUS_1__BUSY_MASK 0xFFFFFFFFL
+//MMSCH_VM_BUSY_STATUS_2
+#define MMSCH_VM_BUSY_STATUS_2__BUSY__SHIFT 0x0
+#define MMSCH_VM_BUSY_STATUS_2__BUSY_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_slmi_adpdec
+//UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_LMI_MMSCH_NC_VMID
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID__SHIFT 0x0
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID__SHIFT 0x4
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID__SHIFT 0x8
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID__SHIFT 0xc
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID__SHIFT 0x10
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID__SHIFT 0x14
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID__SHIFT 0x18
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID__SHIFT 0x1c
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID_MASK 0x0000000FL
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID_MASK 0x000000F0L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID_MASK 0x00000F00L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID_MASK 0x0000F000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID_MASK 0x000F0000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID_MASK 0x00F00000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID_MASK 0x0F000000L
+#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID_MASK 0xF0000000L
+//UVD_LMI_MMSCH_CTRL
+#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN__SHIFT 0x0
+#define UVD_LMI_MMSCH_CTRL__MMSCH_VM__SHIFT 0x1
+#define UVD_LMI_MMSCH_CTRL__PRIV_CLIENT_MMSCH__SHIFT 0x2
+#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP__SHIFT 0x3
+#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP__SHIFT 0x5
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD__SHIFT 0x7
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR__SHIFT 0x9
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP__SHIFT 0xb
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP__SHIFT 0xc
+#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN_MASK 0x00000001L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_VM_MASK 0x00000002L
+#define UVD_LMI_MMSCH_CTRL__PRIV_CLIENT_MMSCH_MASK 0x00000004L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP_MASK 0x00000018L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP_MASK 0x00000060L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_MASK 0x00000180L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_MASK 0x00000600L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP_MASK 0x00000800L
+#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP_MASK 0x00001000L
+//UVD_MMSCH_LMI_STATUS
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_LEN_INT__SHIFT 0x0
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_ADR_ALIGN_INT__SHIFT 0x1
+#define UVD_MMSCH_LMI_STATUS__MMSCH_LMI_WRITE_CLEAN__SHIFT 0x2
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_LEN__SHIFT 0x4
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_ADDR_LSBS__SHIFT 0x8
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_AWRITE__SHIFT 0xc
+#define UVD_MMSCH_LMI_STATUS__MMSCH_RD_CLEAN__SHIFT 0xd
+#define UVD_MMSCH_LMI_STATUS__MMSCH_WR_CLEAN__SHIFT 0xe
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_LEN_INT_MASK 0x00000001L
+#define UVD_MMSCH_LMI_STATUS__LMI_AXI_MMSCH_UNSUPPORTED_ADR_ALIGN_INT_MASK 0x00000002L
+#define UVD_MMSCH_LMI_STATUS__MMSCH_LMI_WRITE_CLEAN_MASK 0x00000004L
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_LEN_MASK 0x000000F0L
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_ADDR_LSBS_MASK 0x00000700L
+#define UVD_MMSCH_LMI_STATUS__AXI_MMSCH_ERR_AWRITE_MASK 0x00001000L
+#define UVD_MMSCH_LMI_STATUS__MMSCH_RD_CLEAN_MASK 0x00002000L
+#define UVD_MMSCH_LMI_STATUS__MMSCH_WR_CLEAN_MASK 0x00004000L
+//VCN_RAS_CNTL_MMSCH
+#define VCN_RAS_CNTL_MMSCH__MMSCH_FATAL_ERROR_EN__SHIFT 0x1
+#define VCN_RAS_CNTL_MMSCH__MMSCH_PMI_EN__SHIFT 0x5
+#define VCN_RAS_CNTL_MMSCH__MMSCH_REARM__SHIFT 0x9
+#define VCN_RAS_CNTL_MMSCH__MMSCH_READY__SHIFT 0x11
+#define VCN_RAS_CNTL_MMSCH__MMSCH_FATAL_ERROR_EN_MASK 0x00000002L
+#define VCN_RAS_CNTL_MMSCH__MMSCH_PMI_EN_MASK 0x00000020L
+#define VCN_RAS_CNTL_MMSCH__MMSCH_REARM_MASK 0x00000200L
+#define VCN_RAS_CNTL_MMSCH__MMSCH_READY_MASK 0x00020000L
+
+
+// addressBlock: aid_uvd0_uvd_jrbc1_uvd_jrbc_dec
+//UVD_JRBC1_UVD_JRBC_RB_WPTR
+#define UVD_JRBC1_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC1_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC1_UVD_JRBC_RB_CNTL
+#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC1_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC1_UVD_JRBC_IB_SIZE
+#define UVD_JRBC1_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC1_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC1_UVD_JRBC_URGENT_CNTL
+#define UVD_JRBC1_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC1_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC1_UVD_JRBC_RB_REF_DATA
+#define UVD_JRBC1_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC1_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER
+#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC1_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC1_UVD_JRBC_SOFT_RESET
+#define UVD_JRBC1_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC1_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC1_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC1_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC1_UVD_JRBC_STATUS
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC1_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC1_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC1_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC1_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC1_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC1_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC1_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC1_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC1_UVD_JRBC_RB_RPTR
+#define UVD_JRBC1_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC1_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS
+#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC1_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS
+#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC1_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC1_UVD_JRBC_IB_SIZE_UPDATE
+#define UVD_JRBC1_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC1_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER
+#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC1_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC1_UVD_JRBC_IB_REF_DATA
+#define UVD_JRBC1_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC1_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC1_UVD_JPEG_PREEMPT_CMD
+#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC1_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA0
+#define UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA1
+#define UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JRBC1_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC1_UVD_JRBC_RB_SIZE
+#define UVD_JRBC1_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC1_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC1_UVD_JRBC_SCRATCH0
+#define UVD_JRBC1_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC1_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_uvd_jrbc2_uvd_jrbc_dec
+//UVD_JRBC2_UVD_JRBC_RB_WPTR
+#define UVD_JRBC2_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC2_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC2_UVD_JRBC_RB_CNTL
+#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC2_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC2_UVD_JRBC_IB_SIZE
+#define UVD_JRBC2_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC2_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC2_UVD_JRBC_URGENT_CNTL
+#define UVD_JRBC2_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC2_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC2_UVD_JRBC_RB_REF_DATA
+#define UVD_JRBC2_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC2_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER
+#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC2_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC2_UVD_JRBC_SOFT_RESET
+#define UVD_JRBC2_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC2_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC2_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC2_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC2_UVD_JRBC_STATUS
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC2_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC2_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC2_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC2_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC2_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC2_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC2_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC2_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC2_UVD_JRBC_RB_RPTR
+#define UVD_JRBC2_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC2_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS
+#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC2_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS
+#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC2_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC2_UVD_JRBC_IB_SIZE_UPDATE
+#define UVD_JRBC2_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC2_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER
+#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC2_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC2_UVD_JRBC_IB_REF_DATA
+#define UVD_JRBC2_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC2_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC2_UVD_JPEG_PREEMPT_CMD
+#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC2_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA0
+#define UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA1
+#define UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JRBC2_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC2_UVD_JRBC_RB_SIZE
+#define UVD_JRBC2_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC2_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC2_UVD_JRBC_SCRATCH0
+#define UVD_JRBC2_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC2_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_uvd_jrbc3_uvd_jrbc_dec
+//UVD_JRBC3_UVD_JRBC_RB_WPTR
+#define UVD_JRBC3_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC3_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC3_UVD_JRBC_RB_CNTL
+#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC3_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC3_UVD_JRBC_IB_SIZE
+#define UVD_JRBC3_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC3_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC3_UVD_JRBC_URGENT_CNTL
+#define UVD_JRBC3_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC3_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC3_UVD_JRBC_RB_REF_DATA
+#define UVD_JRBC3_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC3_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER
+#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC3_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC3_UVD_JRBC_SOFT_RESET
+#define UVD_JRBC3_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC3_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC3_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC3_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC3_UVD_JRBC_STATUS
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC3_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC3_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC3_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC3_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC3_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC3_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC3_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC3_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC3_UVD_JRBC_RB_RPTR
+#define UVD_JRBC3_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC3_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS
+#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC3_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS
+#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC3_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC3_UVD_JRBC_IB_SIZE_UPDATE
+#define UVD_JRBC3_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC3_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER
+#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC3_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC3_UVD_JRBC_IB_REF_DATA
+#define UVD_JRBC3_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC3_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC3_UVD_JPEG_PREEMPT_CMD
+#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC3_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA0
+#define UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA1
+#define UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JRBC3_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC3_UVD_JRBC_RB_SIZE
+#define UVD_JRBC3_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC3_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC3_UVD_JRBC_SCRATCH0
+#define UVD_JRBC3_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC3_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_uvd_jrbc4_uvd_jrbc_dec
+//UVD_JRBC4_UVD_JRBC_RB_WPTR
+#define UVD_JRBC4_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC4_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC4_UVD_JRBC_RB_CNTL
+#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC4_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC4_UVD_JRBC_IB_SIZE
+#define UVD_JRBC4_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC4_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC4_UVD_JRBC_URGENT_CNTL
+#define UVD_JRBC4_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC4_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC4_UVD_JRBC_RB_REF_DATA
+#define UVD_JRBC4_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC4_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER
+#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC4_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC4_UVD_JRBC_SOFT_RESET
+#define UVD_JRBC4_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC4_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC4_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC4_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC4_UVD_JRBC_STATUS
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC4_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC4_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC4_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC4_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC4_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC4_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC4_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC4_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC4_UVD_JRBC_RB_RPTR
+#define UVD_JRBC4_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC4_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS
+#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC4_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS
+#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC4_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC4_UVD_JRBC_IB_SIZE_UPDATE
+#define UVD_JRBC4_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC4_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER
+#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC4_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC4_UVD_JRBC_IB_REF_DATA
+#define UVD_JRBC4_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC4_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC4_UVD_JPEG_PREEMPT_CMD
+#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC4_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA0
+#define UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA1
+#define UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JRBC4_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC4_UVD_JRBC_RB_SIZE
+#define UVD_JRBC4_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC4_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC4_UVD_JRBC_SCRATCH0
+#define UVD_JRBC4_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC4_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_uvd_jrbc5_uvd_jrbc_dec
+//UVD_JRBC5_UVD_JRBC_RB_WPTR
+#define UVD_JRBC5_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC5_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC5_UVD_JRBC_RB_CNTL
+#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC5_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC5_UVD_JRBC_IB_SIZE
+#define UVD_JRBC5_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC5_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC5_UVD_JRBC_URGENT_CNTL
+#define UVD_JRBC5_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC5_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC5_UVD_JRBC_RB_REF_DATA
+#define UVD_JRBC5_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC5_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER
+#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC5_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC5_UVD_JRBC_SOFT_RESET
+#define UVD_JRBC5_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC5_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC5_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC5_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC5_UVD_JRBC_STATUS
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC5_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC5_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC5_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC5_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC5_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC5_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC5_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC5_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC5_UVD_JRBC_RB_RPTR
+#define UVD_JRBC5_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC5_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS
+#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC5_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS
+#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC5_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC5_UVD_JRBC_IB_SIZE_UPDATE
+#define UVD_JRBC5_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC5_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER
+#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC5_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC5_UVD_JRBC_IB_REF_DATA
+#define UVD_JRBC5_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC5_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC5_UVD_JPEG_PREEMPT_CMD
+#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC5_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA0
+#define UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA1
+#define UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JRBC5_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC5_UVD_JRBC_RB_SIZE
+#define UVD_JRBC5_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC5_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC5_UVD_JRBC_SCRATCH0
+#define UVD_JRBC5_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC5_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_uvd_jrbc6_uvd_jrbc_dec
+//UVD_JRBC6_UVD_JRBC_RB_WPTR
+#define UVD_JRBC6_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC6_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC6_UVD_JRBC_RB_CNTL
+#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC6_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC6_UVD_JRBC_IB_SIZE
+#define UVD_JRBC6_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC6_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC6_UVD_JRBC_URGENT_CNTL
+#define UVD_JRBC6_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC6_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC6_UVD_JRBC_RB_REF_DATA
+#define UVD_JRBC6_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC6_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER
+#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC6_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC6_UVD_JRBC_SOFT_RESET
+#define UVD_JRBC6_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC6_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC6_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC6_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC6_UVD_JRBC_STATUS
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC6_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC6_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC6_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC6_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC6_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC6_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC6_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC6_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC6_UVD_JRBC_RB_RPTR
+#define UVD_JRBC6_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC6_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS
+#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC6_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS
+#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC6_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC6_UVD_JRBC_IB_SIZE_UPDATE
+#define UVD_JRBC6_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC6_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER
+#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC6_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC6_UVD_JRBC_IB_REF_DATA
+#define UVD_JRBC6_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC6_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC6_UVD_JPEG_PREEMPT_CMD
+#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC6_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA0
+#define UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA1
+#define UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JRBC6_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC6_UVD_JRBC_RB_SIZE
+#define UVD_JRBC6_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC6_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC6_UVD_JRBC_SCRATCH0
+#define UVD_JRBC6_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC6_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_uvd_jrbc7_uvd_jrbc_dec
+//UVD_JRBC7_UVD_JRBC_RB_WPTR
+#define UVD_JRBC7_UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4
+#define UVD_JRBC7_UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L
+//UVD_JRBC7_UVD_JRBC_RB_CNTL
+#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0
+#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1
+#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4
+#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L
+#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L
+#define UVD_JRBC7_UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L
+//UVD_JRBC7_UVD_JRBC_IB_SIZE
+#define UVD_JRBC7_UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4
+#define UVD_JRBC7_UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC7_UVD_JRBC_URGENT_CNTL
+#define UVD_JRBC7_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0
+#define UVD_JRBC7_UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L
+//UVD_JRBC7_UVD_JRBC_RB_REF_DATA
+#define UVD_JRBC7_UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC7_UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER
+#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC7_UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC7_UVD_JRBC_SOFT_RESET
+#define UVD_JRBC7_UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0
+#define UVD_JRBC7_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11
+#define UVD_JRBC7_UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L
+#define UVD_JRBC7_UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L
+//UVD_JRBC7_UVD_JRBC_STATUS
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa
+#define UVD_JRBC7_UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc
+#define UVD_JRBC7_UVD_JRBC_STATUS__INT_EN__SHIFT 0x10
+#define UVD_JRBC7_UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L
+#define UVD_JRBC7_UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L
+#define UVD_JRBC7_UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L
+#define UVD_JRBC7_UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L
+#define UVD_JRBC7_UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L
+#define UVD_JRBC7_UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L
+//UVD_JRBC7_UVD_JRBC_RB_RPTR
+#define UVD_JRBC7_UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4
+#define UVD_JRBC7_UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L
+//UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS
+#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC7_UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS
+#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0
+#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10
+#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18
+#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL
+#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L
+#define UVD_JRBC7_UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L
+//UVD_JRBC7_UVD_JRBC_IB_SIZE_UPDATE
+#define UVD_JRBC7_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4
+#define UVD_JRBC7_UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L
+//UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER
+#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0
+#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10
+#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18
+#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19
+#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL
+#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L
+#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L
+#define UVD_JRBC7_UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L
+//UVD_JRBC7_UVD_JRBC_IB_REF_DATA
+#define UVD_JRBC7_UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0
+#define UVD_JRBC7_UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL
+//UVD_JRBC7_UVD_JPEG_PREEMPT_CMD
+#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0
+#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1
+#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2
+#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L
+#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L
+#define UVD_JRBC7_UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L
+//UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA0
+#define UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0
+#define UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL
+//UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA1
+#define UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0
+#define UVD_JRBC7_UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL
+//UVD_JRBC7_UVD_JRBC_RB_SIZE
+#define UVD_JRBC7_UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4
+#define UVD_JRBC7_UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L
+//UVD_JRBC7_UVD_JRBC_SCRATCH0
+#define UVD_JRBC7_UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0
+#define UVD_JRBC7_UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL
+
+
+// addressBlock: aid_uvd0_uvd_jmi1_uvd_jmi_dec
+//UVD_JMI1_UVD_JPEG_DEC_PF_CTRL
+#define UVD_JMI1_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0
+#define UVD_JMI1_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1
+#define UVD_JMI1_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L
+#define UVD_JMI1_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L
+//UVD_JMI1_UVD_LMI_JRBC_CTRL
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI1_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI1_UVD_LMI_JPEG_CTRL
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI1_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI1_JPEG_LMI_DROP
+#define UVD_JMI1_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0
+#define UVD_JMI1_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1
+#define UVD_JMI1_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2
+#define UVD_JMI1_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3
+#define UVD_JMI1_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4
+#define UVD_JMI1_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L
+#define UVD_JMI1_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L
+#define UVD_JMI1_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L
+#define UVD_JMI1_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L
+#define UVD_JMI1_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L
+//UVD_JMI1_UVD_LMI_JRBC_IB_VMID
+#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI1_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI1_UVD_LMI_JRBC_RB_VMID
+#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI1_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI1_UVD_LMI_JPEG_VMID
+#define UVD_JMI1_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4
+#define UVD_JMI1_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8
+#define UVD_JMI1_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL
+#define UVD_JMI1_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L
+#define UVD_JMI1_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L
+//UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_LOW
+#define UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH
+#define UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JPEG_PREEMPT_VMID
+#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI1_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L
+//UVD_JMI1_UVD_JMI_ATOMIC_CNTL
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L
+//UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW
+#define UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI1_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_LOW
+#define UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH
+#define UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW
+#define UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_LOW
+#define UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH
+#define UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI1_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI1_UVD_JMI_ATOMIC_CNTL2
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L
+#define UVD_JMI1_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L
+
+
+// addressBlock: aid_uvd0_uvd_jmi2_uvd_jmi_dec
+//UVD_JMI2_UVD_JPEG_DEC_PF_CTRL
+#define UVD_JMI2_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0
+#define UVD_JMI2_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1
+#define UVD_JMI2_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L
+#define UVD_JMI2_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L
+//UVD_JMI2_UVD_LMI_JRBC_CTRL
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI2_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI2_UVD_LMI_JPEG_CTRL
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI2_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI2_JPEG_LMI_DROP
+#define UVD_JMI2_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0
+#define UVD_JMI2_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1
+#define UVD_JMI2_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2
+#define UVD_JMI2_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3
+#define UVD_JMI2_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4
+#define UVD_JMI2_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L
+#define UVD_JMI2_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L
+#define UVD_JMI2_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L
+#define UVD_JMI2_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L
+#define UVD_JMI2_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L
+//UVD_JMI2_UVD_LMI_JRBC_IB_VMID
+#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI2_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI2_UVD_LMI_JRBC_RB_VMID
+#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI2_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI2_UVD_LMI_JPEG_VMID
+#define UVD_JMI2_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4
+#define UVD_JMI2_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8
+#define UVD_JMI2_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL
+#define UVD_JMI2_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L
+#define UVD_JMI2_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L
+//UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_LOW
+#define UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH
+#define UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JPEG_PREEMPT_VMID
+#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI2_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L
+//UVD_JMI2_UVD_JMI_ATOMIC_CNTL
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L
+//UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW
+#define UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI2_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_LOW
+#define UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH
+#define UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW
+#define UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_LOW
+#define UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH
+#define UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI2_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI2_UVD_JMI_ATOMIC_CNTL2
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L
+#define UVD_JMI2_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L
+
+
+// addressBlock: aid_uvd0_uvd_jmi3_uvd_jmi_dec
+//UVD_JMI3_UVD_JPEG_DEC_PF_CTRL
+#define UVD_JMI3_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0
+#define UVD_JMI3_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1
+#define UVD_JMI3_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L
+#define UVD_JMI3_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L
+//UVD_JMI3_UVD_LMI_JRBC_CTRL
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI3_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI3_UVD_LMI_JPEG_CTRL
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI3_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI3_JPEG_LMI_DROP
+#define UVD_JMI3_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0
+#define UVD_JMI3_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1
+#define UVD_JMI3_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2
+#define UVD_JMI3_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3
+#define UVD_JMI3_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4
+#define UVD_JMI3_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L
+#define UVD_JMI3_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L
+#define UVD_JMI3_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L
+#define UVD_JMI3_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L
+#define UVD_JMI3_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L
+//UVD_JMI3_UVD_LMI_JRBC_IB_VMID
+#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI3_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI3_UVD_LMI_JRBC_RB_VMID
+#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI3_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI3_UVD_LMI_JPEG_VMID
+#define UVD_JMI3_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4
+#define UVD_JMI3_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8
+#define UVD_JMI3_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL
+#define UVD_JMI3_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L
+#define UVD_JMI3_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L
+//UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_LOW
+#define UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH
+#define UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JPEG_PREEMPT_VMID
+#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI3_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L
+//UVD_JMI3_UVD_JMI_ATOMIC_CNTL
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L
+//UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW
+#define UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI3_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_LOW
+#define UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH
+#define UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW
+#define UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_LOW
+#define UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH
+#define UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI3_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI3_UVD_JMI_ATOMIC_CNTL2
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L
+#define UVD_JMI3_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L
+
+
+// addressBlock: aid_uvd0_uvd_jmi4_uvd_jmi_dec
+//UVD_JMI4_UVD_JPEG_DEC_PF_CTRL
+#define UVD_JMI4_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0
+#define UVD_JMI4_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1
+#define UVD_JMI4_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L
+#define UVD_JMI4_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L
+//UVD_JMI4_UVD_LMI_JRBC_CTRL
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI4_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI4_UVD_LMI_JPEG_CTRL
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI4_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI4_JPEG_LMI_DROP
+#define UVD_JMI4_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0
+#define UVD_JMI4_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1
+#define UVD_JMI4_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2
+#define UVD_JMI4_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3
+#define UVD_JMI4_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4
+#define UVD_JMI4_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L
+#define UVD_JMI4_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L
+#define UVD_JMI4_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L
+#define UVD_JMI4_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L
+#define UVD_JMI4_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L
+//UVD_JMI4_UVD_LMI_JRBC_IB_VMID
+#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI4_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI4_UVD_LMI_JRBC_RB_VMID
+#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI4_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI4_UVD_LMI_JPEG_VMID
+#define UVD_JMI4_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4
+#define UVD_JMI4_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8
+#define UVD_JMI4_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL
+#define UVD_JMI4_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L
+#define UVD_JMI4_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L
+//UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_LOW
+#define UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH
+#define UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JPEG_PREEMPT_VMID
+#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI4_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L
+//UVD_JMI4_UVD_JMI_ATOMIC_CNTL
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L
+//UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW
+#define UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI4_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_LOW
+#define UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH
+#define UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW
+#define UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_LOW
+#define UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH
+#define UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI4_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI4_UVD_JMI_ATOMIC_CNTL2
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L
+#define UVD_JMI4_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L
+
+
+// addressBlock: aid_uvd0_uvd_jmi5_uvd_jmi_dec
+//UVD_JMI5_UVD_JPEG_DEC_PF_CTRL
+#define UVD_JMI5_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0
+#define UVD_JMI5_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1
+#define UVD_JMI5_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L
+#define UVD_JMI5_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L
+//UVD_JMI5_UVD_LMI_JRBC_CTRL
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI5_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI5_UVD_LMI_JPEG_CTRL
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI5_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI5_JPEG_LMI_DROP
+#define UVD_JMI5_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0
+#define UVD_JMI5_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1
+#define UVD_JMI5_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2
+#define UVD_JMI5_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3
+#define UVD_JMI5_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4
+#define UVD_JMI5_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L
+#define UVD_JMI5_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L
+#define UVD_JMI5_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L
+#define UVD_JMI5_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L
+#define UVD_JMI5_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L
+//UVD_JMI5_UVD_LMI_JRBC_IB_VMID
+#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI5_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI5_UVD_LMI_JRBC_RB_VMID
+#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI5_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI5_UVD_LMI_JPEG_VMID
+#define UVD_JMI5_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4
+#define UVD_JMI5_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8
+#define UVD_JMI5_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL
+#define UVD_JMI5_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L
+#define UVD_JMI5_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L
+//UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_LOW
+#define UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH
+#define UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JPEG_PREEMPT_VMID
+#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI5_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L
+//UVD_JMI5_UVD_JMI_ATOMIC_CNTL
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L
+//UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW
+#define UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI5_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_LOW
+#define UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH
+#define UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW
+#define UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_LOW
+#define UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH
+#define UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI5_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI5_UVD_JMI_ATOMIC_CNTL2
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L
+#define UVD_JMI5_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L
+
+
+// addressBlock: aid_uvd0_uvd_jmi6_uvd_jmi_dec
+//UVD_JMI6_UVD_JPEG_DEC_PF_CTRL
+#define UVD_JMI6_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0
+#define UVD_JMI6_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1
+#define UVD_JMI6_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L
+#define UVD_JMI6_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L
+//UVD_JMI6_UVD_LMI_JRBC_CTRL
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI6_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI6_UVD_LMI_JPEG_CTRL
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI6_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI6_JPEG_LMI_DROP
+#define UVD_JMI6_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0
+#define UVD_JMI6_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1
+#define UVD_JMI6_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2
+#define UVD_JMI6_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3
+#define UVD_JMI6_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4
+#define UVD_JMI6_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L
+#define UVD_JMI6_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L
+#define UVD_JMI6_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L
+#define UVD_JMI6_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L
+#define UVD_JMI6_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L
+//UVD_JMI6_UVD_LMI_JRBC_IB_VMID
+#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI6_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI6_UVD_LMI_JRBC_RB_VMID
+#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI6_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI6_UVD_LMI_JPEG_VMID
+#define UVD_JMI6_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4
+#define UVD_JMI6_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8
+#define UVD_JMI6_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL
+#define UVD_JMI6_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L
+#define UVD_JMI6_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L
+//UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_LOW
+#define UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH
+#define UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JPEG_PREEMPT_VMID
+#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI6_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L
+//UVD_JMI6_UVD_JMI_ATOMIC_CNTL
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L
+//UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW
+#define UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI6_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_LOW
+#define UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH
+#define UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW
+#define UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_LOW
+#define UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH
+#define UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI6_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI6_UVD_JMI_ATOMIC_CNTL2
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L
+#define UVD_JMI6_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L
+
+
+// addressBlock: aid_uvd0_uvd_jmi7_uvd_jmi_dec
+//UVD_JMI7_UVD_JPEG_DEC_PF_CTRL
+#define UVD_JMI7_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS__SHIFT 0x0
+#define UVD_JMI7_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING__SHIFT 0x1
+#define UVD_JMI7_UVD_JPEG_DEC_PF_CTRL__DEC_PF_HANDLING_DIS_MASK 0x00000001L
+#define UVD_JMI7_UVD_JPEG_DEC_PF_CTRL__DEC_PF_SW_GATING_MASK 0x00000002L
+//UVD_JMI7_UVD_LMI_JRBC_CTRL
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI7_UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI7_UVD_LMI_JPEG_CTRL
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L
+#define UVD_JMI7_UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L
+//UVD_JMI7_JPEG_LMI_DROP
+#define UVD_JMI7_JPEG_LMI_DROP__JPEG_WR_DROP__SHIFT 0x0
+#define UVD_JMI7_JPEG_LMI_DROP__JRBC_WR_DROP__SHIFT 0x1
+#define UVD_JMI7_JPEG_LMI_DROP__JPEG_RD_DROP__SHIFT 0x2
+#define UVD_JMI7_JPEG_LMI_DROP__JRBC_RD_DROP__SHIFT 0x3
+#define UVD_JMI7_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP__SHIFT 0x4
+#define UVD_JMI7_JPEG_LMI_DROP__JPEG_WR_DROP_MASK 0x00000001L
+#define UVD_JMI7_JPEG_LMI_DROP__JRBC_WR_DROP_MASK 0x00000002L
+#define UVD_JMI7_JPEG_LMI_DROP__JPEG_RD_DROP_MASK 0x00000004L
+#define UVD_JMI7_JPEG_LMI_DROP__JRBC_RD_DROP_MASK 0x00000008L
+#define UVD_JMI7_JPEG_LMI_DROP__JPEG_ATOMIC_WR_DROP_MASK 0x00000010L
+//UVD_JMI7_UVD_LMI_JRBC_IB_VMID
+#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4
+#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI7_UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI7_UVD_LMI_JRBC_RB_VMID
+#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4
+#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8
+#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL
+#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L
+#define UVD_JMI7_UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L
+//UVD_JMI7_UVD_LMI_JPEG_VMID
+#define UVD_JMI7_UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4
+#define UVD_JMI7_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8
+#define UVD_JMI7_UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL
+#define UVD_JMI7_UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L
+#define UVD_JMI7_UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L
+//UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW
+#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH
+#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_LOW
+#define UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH
+#define UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JPEG_PREEMPT_VMID
+#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL
+//UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L
+#define UVD_JMI7_UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L
+//UVD_JMI7_UVD_JMI_ATOMIC_CNTL
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en__SHIFT 0x0
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_max_burst__SHIFT 0x1
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop__SHIFT 0x5
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en__SHIFT 0x6
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG__SHIFT 0x7
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE__SHIFT 0xb
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_arb_wait_en_MASK 0x00000001L
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_max_burst_MASK 0x0000001EL
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_wr_drop_MASK 0x00000020L
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__atomic_wr_clamping_en_MASK 0x00000040L
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__ATOMIC_WR_URG_MASK 0x00000780L
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL__ATOMIC_SW_GATE_MASK 0x00000800L
+//UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW
+#define UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI7_UVD_JMI_ATOMIC_USER0_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_LOW
+#define UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH
+#define UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW
+#define UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH
+#define UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_LOW
+#define UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH
+#define UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW
+#define UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH
+#define UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0
+#define UVD_JMI7_UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL
+//UVD_JMI7_UVD_JMI_ATOMIC_CNTL2
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap__SHIFT 0x10
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x18
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL2__atomic_uvd_swap_MASK 0x00FF0000L
+#define UVD_JMI7_UVD_JMI_ATOMIC_CNTL2__ATOMIC_MC_SWAP_MASK 0xFF000000L
+
+
+// addressBlock: uvdctxind
+//UVD_CGC_MEM_CTRL
+#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN__SHIFT 0x0
+#define UVD_CGC_MEM_CTRL__MPC_LS_EN__SHIFT 0x1
+#define UVD_CGC_MEM_CTRL__MPRD_LS_EN__SHIFT 0x2
+#define UVD_CGC_MEM_CTRL__WCB_LS_EN__SHIFT 0x3
+#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN__SHIFT 0x4
+#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN__SHIFT 0x5
+#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN__SHIFT 0x6
+#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN__SHIFT 0x7
+#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN__SHIFT 0x8
+#define UVD_CGC_MEM_CTRL__SYS_LS_EN__SHIFT 0x9
+#define UVD_CGC_MEM_CTRL__VCPU_LS_EN__SHIFT 0xa
+#define UVD_CGC_MEM_CTRL__MIF_LS_EN__SHIFT 0xc
+#define UVD_CGC_MEM_CTRL__LCM_LS_EN__SHIFT 0xd
+#define UVD_CGC_MEM_CTRL__MMSCH_LS_EN__SHIFT 0xe
+#define UVD_CGC_MEM_CTRL__MPC1_LS_EN__SHIFT 0xf
+#define UVD_CGC_MEM_CTRL__LS_SET_DELAY__SHIFT 0x10
+#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY__SHIFT 0x14
+#define UVD_CGC_MEM_CTRL__LMI_MC_LS_EN_MASK 0x00000001L
+#define UVD_CGC_MEM_CTRL__MPC_LS_EN_MASK 0x00000002L
+#define UVD_CGC_MEM_CTRL__MPRD_LS_EN_MASK 0x00000004L
+#define UVD_CGC_MEM_CTRL__WCB_LS_EN_MASK 0x00000008L
+#define UVD_CGC_MEM_CTRL__UDEC_RE_LS_EN_MASK 0x00000010L
+#define UVD_CGC_MEM_CTRL__UDEC_CM_LS_EN_MASK 0x00000020L
+#define UVD_CGC_MEM_CTRL__UDEC_IT_LS_EN_MASK 0x00000040L
+#define UVD_CGC_MEM_CTRL__UDEC_DB_LS_EN_MASK 0x00000080L
+#define UVD_CGC_MEM_CTRL__UDEC_MP_LS_EN_MASK 0x00000100L
+#define UVD_CGC_MEM_CTRL__SYS_LS_EN_MASK 0x00000200L
+#define UVD_CGC_MEM_CTRL__VCPU_LS_EN_MASK 0x00000400L
+#define UVD_CGC_MEM_CTRL__MIF_LS_EN_MASK 0x00001000L
+#define UVD_CGC_MEM_CTRL__LCM_LS_EN_MASK 0x00002000L
+#define UVD_CGC_MEM_CTRL__MMSCH_LS_EN_MASK 0x00004000L
+#define UVD_CGC_MEM_CTRL__MPC1_LS_EN_MASK 0x00008000L
+#define UVD_CGC_MEM_CTRL__LS_SET_DELAY_MASK 0x000F0000L
+#define UVD_CGC_MEM_CTRL__LS_CLEAR_DELAY_MASK 0x00F00000L
+//UVD_CGC_CTRL2
+#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN__SHIFT 0x0
+#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN__SHIFT 0x1
+#define UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT 0x2
+#define UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK 0x00000001L
+#define UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK 0x00000002L
+#define UVD_CGC_CTRL2__GATER_DIV_ID_MASK 0x0000001CL
+//UVD_CGC_MEM_DS_CTRL
+#define UVD_CGC_MEM_DS_CTRL__LMI_MC_DS_EN__SHIFT 0x0
+#define UVD_CGC_MEM_DS_CTRL__MPC_DS_EN__SHIFT 0x1
+#define UVD_CGC_MEM_DS_CTRL__MPRD_DS_EN__SHIFT 0x2
+#define UVD_CGC_MEM_DS_CTRL__WCB_DS_EN__SHIFT 0x3
+#define UVD_CGC_MEM_DS_CTRL__UDEC_RE_DS_EN__SHIFT 0x4
+#define UVD_CGC_MEM_DS_CTRL__UDEC_CM_DS_EN__SHIFT 0x5
+#define UVD_CGC_MEM_DS_CTRL__UDEC_IT_DS_EN__SHIFT 0x6
+#define UVD_CGC_MEM_DS_CTRL__UDEC_DB_DS_EN__SHIFT 0x7
+#define UVD_CGC_MEM_DS_CTRL__UDEC_MP_DS_EN__SHIFT 0x8
+#define UVD_CGC_MEM_DS_CTRL__SYS_DS_EN__SHIFT 0x9
+#define UVD_CGC_MEM_DS_CTRL__VCPU_DS_EN__SHIFT 0xa
+#define UVD_CGC_MEM_DS_CTRL__MIF_DS_EN__SHIFT 0xc
+#define UVD_CGC_MEM_DS_CTRL__LCM_DS_EN__SHIFT 0xd
+#define UVD_CGC_MEM_DS_CTRL__MMSCH_DS_EN__SHIFT 0xe
+#define UVD_CGC_MEM_DS_CTRL__MPC1_DS_EN__SHIFT 0xf
+#define UVD_CGC_MEM_DS_CTRL__LMI_MC_DS_EN_MASK 0x00000001L
+#define UVD_CGC_MEM_DS_CTRL__MPC_DS_EN_MASK 0x00000002L
+#define UVD_CGC_MEM_DS_CTRL__MPRD_DS_EN_MASK 0x00000004L
+#define UVD_CGC_MEM_DS_CTRL__WCB_DS_EN_MASK 0x00000008L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_RE_DS_EN_MASK 0x00000010L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_CM_DS_EN_MASK 0x00000020L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_IT_DS_EN_MASK 0x00000040L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_DB_DS_EN_MASK 0x00000080L
+#define UVD_CGC_MEM_DS_CTRL__UDEC_MP_DS_EN_MASK 0x00000100L
+#define UVD_CGC_MEM_DS_CTRL__SYS_DS_EN_MASK 0x00000200L
+#define UVD_CGC_MEM_DS_CTRL__VCPU_DS_EN_MASK 0x00000400L
+#define UVD_CGC_MEM_DS_CTRL__MIF_DS_EN_MASK 0x00001000L
+#define UVD_CGC_MEM_DS_CTRL__LCM_DS_EN_MASK 0x00002000L
+#define UVD_CGC_MEM_DS_CTRL__MMSCH_DS_EN_MASK 0x00004000L
+#define UVD_CGC_MEM_DS_CTRL__MPC1_DS_EN_MASK 0x00008000L
+//UVD_CGC_MEM_SD_CTRL
+#define UVD_CGC_MEM_SD_CTRL__LMI_MC_SD_EN__SHIFT 0x0
+#define UVD_CGC_MEM_SD_CTRL__MPC_SD_EN__SHIFT 0x1
+#define UVD_CGC_MEM_SD_CTRL__MPRD_SD_EN__SHIFT 0x2
+#define UVD_CGC_MEM_SD_CTRL__WCB_SD_EN__SHIFT 0x3
+#define UVD_CGC_MEM_SD_CTRL__UDEC_RE_SD_EN__SHIFT 0x4
+#define UVD_CGC_MEM_SD_CTRL__UDEC_CM_SD_EN__SHIFT 0x5
+#define UVD_CGC_MEM_SD_CTRL__UDEC_IT_SD_EN__SHIFT 0x6
+#define UVD_CGC_MEM_SD_CTRL__UDEC_DB_SD_EN__SHIFT 0x7
+#define UVD_CGC_MEM_SD_CTRL__UDEC_MP_SD_EN__SHIFT 0x8
+#define UVD_CGC_MEM_SD_CTRL__SYS_SD_EN__SHIFT 0x9
+#define UVD_CGC_MEM_SD_CTRL__VCPU_SD_EN__SHIFT 0xa
+#define UVD_CGC_MEM_SD_CTRL__MIF_SD_EN__SHIFT 0xc
+#define UVD_CGC_MEM_SD_CTRL__LCM_SD_EN__SHIFT 0xd
+#define UVD_CGC_MEM_SD_CTRL__MMSCH_SD_EN__SHIFT 0xe
+#define UVD_CGC_MEM_SD_CTRL__MPC1_SD_EN__SHIFT 0xf
+#define UVD_CGC_MEM_SD_CTRL__LMI_MC_SD_EN_MASK 0x00000001L
+#define UVD_CGC_MEM_SD_CTRL__MPC_SD_EN_MASK 0x00000002L
+#define UVD_CGC_MEM_SD_CTRL__MPRD_SD_EN_MASK 0x00000004L
+#define UVD_CGC_MEM_SD_CTRL__WCB_SD_EN_MASK 0x00000008L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_RE_SD_EN_MASK 0x00000010L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_CM_SD_EN_MASK 0x00000020L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_IT_SD_EN_MASK 0x00000040L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_DB_SD_EN_MASK 0x00000080L
+#define UVD_CGC_MEM_SD_CTRL__UDEC_MP_SD_EN_MASK 0x00000100L
+#define UVD_CGC_MEM_SD_CTRL__SYS_SD_EN_MASK 0x00000200L
+#define UVD_CGC_MEM_SD_CTRL__VCPU_SD_EN_MASK 0x00000400L
+#define UVD_CGC_MEM_SD_CTRL__MIF_SD_EN_MASK 0x00001000L
+#define UVD_CGC_MEM_SD_CTRL__LCM_SD_EN_MASK 0x00002000L
+#define UVD_CGC_MEM_SD_CTRL__MMSCH_SD_EN_MASK 0x00004000L
+#define UVD_CGC_MEM_SD_CTRL__MPC1_SD_EN_MASK 0x00008000L
+//UVD_SW_SCRATCH_00
+#define UVD_SW_SCRATCH_00__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_00__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_01
+#define UVD_SW_SCRATCH_01__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_01__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_02
+#define UVD_SW_SCRATCH_02__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_02__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_03
+#define UVD_SW_SCRATCH_03__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_03__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_04
+#define UVD_SW_SCRATCH_04__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_04__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_05
+#define UVD_SW_SCRATCH_05__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_05__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_06
+#define UVD_SW_SCRATCH_06__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_06__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_07
+#define UVD_SW_SCRATCH_07__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_07__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_08
+#define UVD_SW_SCRATCH_08__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_08__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_09
+#define UVD_SW_SCRATCH_09__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_09__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_10
+#define UVD_SW_SCRATCH_10__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_10__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_11
+#define UVD_SW_SCRATCH_11__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_11__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_12
+#define UVD_SW_SCRATCH_12__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_12__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_13
+#define UVD_SW_SCRATCH_13__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_13__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_14
+#define UVD_SW_SCRATCH_14__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_14__DATA_MASK 0xFFFFFFFFL
+//UVD_SW_SCRATCH_15
+#define UVD_SW_SCRATCH_15__DATA__SHIFT 0x0
+#define UVD_SW_SCRATCH_15__DATA_MASK 0xFFFFFFFFL
+//UVD_IH_SEM_CTRL
+#define UVD_IH_SEM_CTRL__IH_STALL_EN__SHIFT 0x0
+#define UVD_IH_SEM_CTRL__SEM_STALL_EN__SHIFT 0x1
+#define UVD_IH_SEM_CTRL__IH_STATUS_CLEAN__SHIFT 0x2
+#define UVD_IH_SEM_CTRL__SEM_STATUS_CLEAN__SHIFT 0x3
+#define UVD_IH_SEM_CTRL__IH_VMID__SHIFT 0x4
+#define UVD_IH_SEM_CTRL__IH_USER_DATA__SHIFT 0x8
+#define UVD_IH_SEM_CTRL__IH_RINGID__SHIFT 0x14
+#define UVD_IH_SEM_CTRL__IH_STALL_EN_MASK 0x00000001L
+#define UVD_IH_SEM_CTRL__SEM_STALL_EN_MASK 0x00000002L
+#define UVD_IH_SEM_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L
+#define UVD_IH_SEM_CTRL__SEM_STATUS_CLEAN_MASK 0x00000008L
+#define UVD_IH_SEM_CTRL__IH_VMID_MASK 0x000000F0L
+#define UVD_IH_SEM_CTRL__IH_USER_DATA_MASK 0x000FFF00L
+#define UVD_IH_SEM_CTRL__IH_RINGID_MASK 0x0FF00000L
+
+
+// addressBlock: lmi_adp_indirect
+//UVD_LMI_CRC0
+#define UVD_LMI_CRC0__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC0__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC1
+#define UVD_LMI_CRC1__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC1__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC2
+#define UVD_LMI_CRC2__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC2__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC3
+#define UVD_LMI_CRC3__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC3__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC10
+#define UVD_LMI_CRC10__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC10__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC11
+#define UVD_LMI_CRC11__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC11__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC12
+#define UVD_LMI_CRC12__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC12__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC13
+#define UVD_LMI_CRC13__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC13__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC14
+#define UVD_LMI_CRC14__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC14__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_CRC15
+#define UVD_LMI_CRC15__CRC32__SHIFT 0x0
+#define UVD_LMI_CRC15__CRC32_MASK 0xFFFFFFFFL
+//UVD_LMI_SWAP_CNTL2
+#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP__SHIFT 0x0
+#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP__SHIFT 0x2
+#define UVD_LMI_SWAP_CNTL2__ATOMIC_MC_SWAP__SHIFT 0x4
+#define UVD_LMI_SWAP_CNTL2__CENC_MC_SWAP__SHIFT 0xc
+#define UVD_LMI_SWAP_CNTL2__FBC_KEY_MC_SWAP__SHIFT 0xe
+#define UVD_LMI_SWAP_CNTL2__SCPU_R_MC_SWAP_MASK 0x00000003L
+#define UVD_LMI_SWAP_CNTL2__SCPU_W_MC_SWAP_MASK 0x0000000CL
+#define UVD_LMI_SWAP_CNTL2__ATOMIC_MC_SWAP_MASK 0x00000FF0L
+#define UVD_LMI_SWAP_CNTL2__CENC_MC_SWAP_MASK 0x00003000L
+#define UVD_LMI_SWAP_CNTL2__FBC_KEY_MC_SWAP_MASK 0x0000C000L
+//UVD_MEMCHECK_SYS_INT_EN
+#define UVD_MEMCHECK_SYS_INT_EN__RE_ERR_EN__SHIFT 0x0
+#define UVD_MEMCHECK_SYS_INT_EN__IT_ERR_EN__SHIFT 0x1
+#define UVD_MEMCHECK_SYS_INT_EN__MP_ERR_EN__SHIFT 0x2
+#define UVD_MEMCHECK_SYS_INT_EN__DB_ERR_EN__SHIFT 0x3
+#define UVD_MEMCHECK_SYS_INT_EN__DBW_ERR_EN__SHIFT 0x4
+#define UVD_MEMCHECK_SYS_INT_EN__CM_ERR_EN__SHIFT 0x5
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_REF_ERR_EN__SHIFT 0x6
+#define UVD_MEMCHECK_SYS_INT_EN__VCPU_ERR_EN__SHIFT 0x7
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_DBW_ERR_EN__SHIFT 0x8
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_CM_COLOC_ERR_EN__SHIFT 0x9
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP0_ERR_EN__SHIFT 0xa
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP1_ERR_EN__SHIFT 0xb
+#define UVD_MEMCHECK_SYS_INT_EN__SRE_ERR_EN__SHIFT 0xc
+#define UVD_MEMCHECK_SYS_INT_EN__IT_RD_ERR_EN__SHIFT 0xf
+#define UVD_MEMCHECK_SYS_INT_EN__CM_RD_ERR_EN__SHIFT 0x10
+#define UVD_MEMCHECK_SYS_INT_EN__DB_RD_ERR_EN__SHIFT 0x11
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_RD_ERR_EN__SHIFT 0x12
+#define UVD_MEMCHECK_SYS_INT_EN__IDCT_RD_ERR_EN__SHIFT 0x13
+#define UVD_MEMCHECK_SYS_INT_EN__MPC_RD_ERR_EN__SHIFT 0x14
+#define UVD_MEMCHECK_SYS_INT_EN__LBSI_RD_ERR_EN__SHIFT 0x15
+#define UVD_MEMCHECK_SYS_INT_EN__RBC_RD_ERR_EN__SHIFT 0x18
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP2_ERR_EN__SHIFT 0x1b
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP3_ERR_EN__SHIFT 0x1c
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR_ERR_EN__SHIFT 0x1d
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR2_ERR_EN__SHIFT 0x1e
+#define UVD_MEMCHECK_SYS_INT_EN__PREF_ERR_EN__SHIFT 0x1f
+#define UVD_MEMCHECK_SYS_INT_EN__RE_ERR_EN_MASK 0x00000001L
+#define UVD_MEMCHECK_SYS_INT_EN__IT_ERR_EN_MASK 0x00000002L
+#define UVD_MEMCHECK_SYS_INT_EN__MP_ERR_EN_MASK 0x00000004L
+#define UVD_MEMCHECK_SYS_INT_EN__DB_ERR_EN_MASK 0x00000008L
+#define UVD_MEMCHECK_SYS_INT_EN__DBW_ERR_EN_MASK 0x00000010L
+#define UVD_MEMCHECK_SYS_INT_EN__CM_ERR_EN_MASK 0x00000020L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_REF_ERR_EN_MASK 0x00000040L
+#define UVD_MEMCHECK_SYS_INT_EN__VCPU_ERR_EN_MASK 0x00000080L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_DBW_ERR_EN_MASK 0x00000100L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_CM_COLOC_ERR_EN_MASK 0x00000200L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP0_ERR_EN_MASK 0x00000400L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP1_ERR_EN_MASK 0x00000800L
+#define UVD_MEMCHECK_SYS_INT_EN__SRE_ERR_EN_MASK 0x00001000L
+#define UVD_MEMCHECK_SYS_INT_EN__IT_RD_ERR_EN_MASK 0x00008000L
+#define UVD_MEMCHECK_SYS_INT_EN__CM_RD_ERR_EN_MASK 0x00010000L
+#define UVD_MEMCHECK_SYS_INT_EN__DB_RD_ERR_EN_MASK 0x00020000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_RD_ERR_EN_MASK 0x00040000L
+#define UVD_MEMCHECK_SYS_INT_EN__IDCT_RD_ERR_EN_MASK 0x00080000L
+#define UVD_MEMCHECK_SYS_INT_EN__MPC_RD_ERR_EN_MASK 0x00100000L
+#define UVD_MEMCHECK_SYS_INT_EN__LBSI_RD_ERR_EN_MASK 0x00200000L
+#define UVD_MEMCHECK_SYS_INT_EN__RBC_RD_ERR_EN_MASK 0x01000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP2_ERR_EN_MASK 0x08000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_BSP3_ERR_EN_MASK 0x10000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR_ERR_EN_MASK 0x20000000L
+#define UVD_MEMCHECK_SYS_INT_EN__MIF_SCLR2_ERR_EN_MASK 0x40000000L
+#define UVD_MEMCHECK_SYS_INT_EN__PREF_ERR_EN_MASK 0x80000000L
+//UVD_MEMCHECK_SYS_INT_STAT
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_LO_ERR__SHIFT 0xc
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_HI_ERR__SHIFT 0xd
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_LO_ERR__SHIFT 0xe
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_HI_ERR__SHIFT 0xf
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_LO_ERR__SHIFT 0x12
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_HI_ERR__SHIFT 0x13
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_LO_ERR__SHIFT 0x14
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_HI_ERR__SHIFT 0x15
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_LO_ERR__SHIFT 0x1e
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_HI_ERR__SHIFT 0x1f
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK_SYS_INT_STAT__RE_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK_SYS_INT_STAT__MP_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK_SYS_INT_STAT__DB_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK_SYS_INT_STAT__DBW_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK_SYS_INT_STAT__CM_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_LO_ERR_MASK 0x00001000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_REF_HI_ERR_MASK 0x00002000L
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_LO_ERR_MASK 0x00004000L
+#define UVD_MEMCHECK_SYS_INT_STAT__VCPU_HI_ERR_MASK 0x00008000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_DBW_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_LO_ERR_MASK 0x00040000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_CM_COLOC_HI_ERR_MASK 0x00080000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_LO_ERR_MASK 0x00100000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP0_HI_ERR_MASK 0x00200000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK_SYS_INT_STAT__MIF_BSP1_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK_SYS_INT_STAT__SRE_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_LO_ERR_MASK 0x40000000L
+#define UVD_MEMCHECK_SYS_INT_STAT__IT_RD_HI_ERR_MASK 0x80000000L
+//UVD_MEMCHECK_SYS_INT_ACK
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_LO_ACK__SHIFT 0xc
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_HI_ACK__SHIFT 0xd
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_LO_ACK__SHIFT 0xe
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_HI_ACK__SHIFT 0xf
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_LO_ACK__SHIFT 0x12
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_HI_ACK__SHIFT 0x13
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_LO_ACK__SHIFT 0x14
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_HI_ACK__SHIFT 0x15
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_LO_ACK__SHIFT 0x1e
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_HI_ACK__SHIFT 0x1f
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK_SYS_INT_ACK__RE_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK_SYS_INT_ACK__MP_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK_SYS_INT_ACK__DB_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK_SYS_INT_ACK__DBW_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK_SYS_INT_ACK__CM_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_LO_ACK_MASK 0x00001000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_REF_HI_ACK_MASK 0x00002000L
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_LO_ACK_MASK 0x00004000L
+#define UVD_MEMCHECK_SYS_INT_ACK__VCPU_HI_ACK_MASK 0x00008000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_DBW_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_LO_ACK_MASK 0x00040000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_CM_COLOC_HI_ACK_MASK 0x00080000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_LO_ACK_MASK 0x00100000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP0_HI_ACK_MASK 0x00200000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK_SYS_INT_ACK__MIF_BSP1_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK_SYS_INT_ACK__SRE_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_LO_ACK_MASK 0x40000000L
+#define UVD_MEMCHECK_SYS_INT_ACK__IT_RD_HI_ACK_MASK 0x80000000L
+//UVD_MEMCHECK_VCPU_INT_EN
+#define UVD_MEMCHECK_VCPU_INT_EN__RE_ERR_EN__SHIFT 0x0
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_ERR_EN__SHIFT 0x1
+#define UVD_MEMCHECK_VCPU_INT_EN__MP_ERR_EN__SHIFT 0x2
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_ERR_EN__SHIFT 0x3
+#define UVD_MEMCHECK_VCPU_INT_EN__DBW_ERR_EN__SHIFT 0x4
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_ERR_EN__SHIFT 0x5
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_REF_ERR_EN__SHIFT 0x6
+#define UVD_MEMCHECK_VCPU_INT_EN__VCPU_ERR_EN__SHIFT 0x7
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_DBW_ERR_EN__SHIFT 0x8
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_CM_COLOC_ERR_EN__SHIFT 0x9
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP0_ERR_EN__SHIFT 0xa
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP1_ERR_EN__SHIFT 0xb
+#define UVD_MEMCHECK_VCPU_INT_EN__SRE_ERR_EN__SHIFT 0xc
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_RD_ERR_EN__SHIFT 0xf
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_RD_ERR_EN__SHIFT 0x10
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_RD_ERR_EN__SHIFT 0x11
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_RD_ERR_EN__SHIFT 0x12
+#define UVD_MEMCHECK_VCPU_INT_EN__IDCT_RD_ERR_EN__SHIFT 0x13
+#define UVD_MEMCHECK_VCPU_INT_EN__MPC_RD_ERR_EN__SHIFT 0x14
+#define UVD_MEMCHECK_VCPU_INT_EN__LBSI_RD_ERR_EN__SHIFT 0x15
+#define UVD_MEMCHECK_VCPU_INT_EN__RBC_RD_ERR_EN__SHIFT 0x18
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP2_ERR_EN__SHIFT 0x19
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP3_ERR_EN__SHIFT 0x1a
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR_ERR_EN__SHIFT 0x1b
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR2_ERR_EN__SHIFT 0x1c
+#define UVD_MEMCHECK_VCPU_INT_EN__PREF_ERR_EN__SHIFT 0x1d
+#define UVD_MEMCHECK_VCPU_INT_EN__RE_ERR_EN_MASK 0x00000001L
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_ERR_EN_MASK 0x00000002L
+#define UVD_MEMCHECK_VCPU_INT_EN__MP_ERR_EN_MASK 0x00000004L
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_ERR_EN_MASK 0x00000008L
+#define UVD_MEMCHECK_VCPU_INT_EN__DBW_ERR_EN_MASK 0x00000010L
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_ERR_EN_MASK 0x00000020L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_REF_ERR_EN_MASK 0x00000040L
+#define UVD_MEMCHECK_VCPU_INT_EN__VCPU_ERR_EN_MASK 0x00000080L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_DBW_ERR_EN_MASK 0x00000100L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_CM_COLOC_ERR_EN_MASK 0x00000200L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP0_ERR_EN_MASK 0x00000400L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP1_ERR_EN_MASK 0x00000800L
+#define UVD_MEMCHECK_VCPU_INT_EN__SRE_ERR_EN_MASK 0x00001000L
+#define UVD_MEMCHECK_VCPU_INT_EN__IT_RD_ERR_EN_MASK 0x00008000L
+#define UVD_MEMCHECK_VCPU_INT_EN__CM_RD_ERR_EN_MASK 0x00010000L
+#define UVD_MEMCHECK_VCPU_INT_EN__DB_RD_ERR_EN_MASK 0x00020000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_RD_ERR_EN_MASK 0x00040000L
+#define UVD_MEMCHECK_VCPU_INT_EN__IDCT_RD_ERR_EN_MASK 0x00080000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MPC_RD_ERR_EN_MASK 0x00100000L
+#define UVD_MEMCHECK_VCPU_INT_EN__LBSI_RD_ERR_EN_MASK 0x00200000L
+#define UVD_MEMCHECK_VCPU_INT_EN__RBC_RD_ERR_EN_MASK 0x01000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP2_ERR_EN_MASK 0x02000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_BSP3_ERR_EN_MASK 0x04000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR_ERR_EN_MASK 0x08000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__MIF_SCLR2_ERR_EN_MASK 0x10000000L
+#define UVD_MEMCHECK_VCPU_INT_EN__PREF_ERR_EN_MASK 0x20000000L
+//UVD_MEMCHECK_VCPU_INT_STAT
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_LO_ERR__SHIFT 0xc
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_HI_ERR__SHIFT 0xd
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_LO_ERR__SHIFT 0xe
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_HI_ERR__SHIFT 0xf
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_LO_ERR__SHIFT 0x12
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_HI_ERR__SHIFT 0x13
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_LO_ERR__SHIFT 0x14
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_HI_ERR__SHIFT 0x15
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_LO_ERR__SHIFT 0x1e
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_HI_ERR__SHIFT 0x1f
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK_VCPU_INT_STAT__RE_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MP_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DB_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK_VCPU_INT_STAT__DBW_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK_VCPU_INT_STAT__CM_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_LO_ERR_MASK 0x00001000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_REF_HI_ERR_MASK 0x00002000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_LO_ERR_MASK 0x00004000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__VCPU_HI_ERR_MASK 0x00008000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_DBW_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_LO_ERR_MASK 0x00040000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_CM_COLOC_HI_ERR_MASK 0x00080000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_LO_ERR_MASK 0x00100000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP0_HI_ERR_MASK 0x00200000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__MIF_BSP1_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__SRE_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_LO_ERR_MASK 0x40000000L
+#define UVD_MEMCHECK_VCPU_INT_STAT__IT_RD_HI_ERR_MASK 0x80000000L
+//UVD_MEMCHECK_VCPU_INT_ACK
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_LO_ACK__SHIFT 0xc
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_HI_ACK__SHIFT 0xd
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_LO_ACK__SHIFT 0xe
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_HI_ACK__SHIFT 0xf
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_LO_ACK__SHIFT 0x12
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_HI_ACK__SHIFT 0x13
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_LO_ACK__SHIFT 0x14
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_HI_ACK__SHIFT 0x15
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_LO_ACK__SHIFT 0x1e
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_HI_ACK__SHIFT 0x1f
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK_VCPU_INT_ACK__RE_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MP_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DB_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK_VCPU_INT_ACK__DBW_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK_VCPU_INT_ACK__CM_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_LO_ACK_MASK 0x00001000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_REF_HI_ACK_MASK 0x00002000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_LO_ACK_MASK 0x00004000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__VCPU_HI_ACK_MASK 0x00008000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_DBW_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_LO_ACK_MASK 0x00040000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_CM_COLOC_HI_ACK_MASK 0x00080000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_LO_ACK_MASK 0x00100000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP0_HI_ACK_MASK 0x00200000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__MIF_BSP1_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__SRE_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_LO_ACK_MASK 0x40000000L
+#define UVD_MEMCHECK_VCPU_INT_ACK__IT_RD_HI_ACK_MASK 0x80000000L
+//UVD_MEMCHECK2_SYS_INT_STAT
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_LO_ERR__SHIFT 0x1a
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_HI_ERR__SHIFT 0x1b
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_LO_ERR__SHIFT 0x1c
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_HI_ERR__SHIFT 0x1d
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_LO_ERR__SHIFT 0x1e
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_HI_ERR__SHIFT 0x1f
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK2_SYS_INT_STAT__CM_RD_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK2_SYS_INT_STAT__DB_RD_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_RD_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK2_SYS_INT_STAT__IDCT_RD_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MPC_RD_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK2_SYS_INT_STAT__LBSI_RD_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__RBC_RD_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP2_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_BSP3_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_LO_ERR_MASK 0x04000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR_HI_ERR_MASK 0x08000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_LO_ERR_MASK 0x10000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__MIF_SCLR2_HI_ERR_MASK 0x20000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_LO_ERR_MASK 0x40000000L
+#define UVD_MEMCHECK2_SYS_INT_STAT__PREF_HI_ERR_MASK 0x80000000L
+//UVD_MEMCHECK2_SYS_INT_ACK
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_LO_ACK__SHIFT 0x1a
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_HI_ACK__SHIFT 0x1b
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_LO_ACK__SHIFT 0x1c
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_HI_ACK__SHIFT 0x1d
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_LO_ACK__SHIFT 0x1e
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_HI_ACK__SHIFT 0x1f
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK2_SYS_INT_ACK__CM_RD_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK2_SYS_INT_ACK__DB_RD_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_RD_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK2_SYS_INT_ACK__IDCT_RD_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MPC_RD_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK2_SYS_INT_ACK__LBSI_RD_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__RBC_RD_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP2_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_BSP3_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_LO_ACK_MASK 0x04000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR_HI_ACK_MASK 0x08000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_LO_ACK_MASK 0x10000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__MIF_SCLR2_HI_ACK_MASK 0x20000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_LO_ACK_MASK 0x40000000L
+#define UVD_MEMCHECK2_SYS_INT_ACK__PREF_HI_ACK_MASK 0x80000000L
+//UVD_MEMCHECK2_VCPU_INT_STAT
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_LO_ERR__SHIFT 0x0
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_HI_ERR__SHIFT 0x1
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_LO_ERR__SHIFT 0x2
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_HI_ERR__SHIFT 0x3
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_LO_ERR__SHIFT 0x4
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_HI_ERR__SHIFT 0x5
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_LO_ERR__SHIFT 0x6
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_HI_ERR__SHIFT 0x7
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_LO_ERR__SHIFT 0x8
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_HI_ERR__SHIFT 0x9
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_LO_ERR__SHIFT 0xa
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_HI_ERR__SHIFT 0xb
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_LO_ERR__SHIFT 0x10
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_HI_ERR__SHIFT 0x11
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_LO_ERR__SHIFT 0x12
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_HI_ERR__SHIFT 0x13
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_LO_ERR__SHIFT 0x14
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_HI_ERR__SHIFT 0x15
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_LO_ERR__SHIFT 0x16
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_HI_ERR__SHIFT 0x17
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_LO_ERR__SHIFT 0x18
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_HI_ERR__SHIFT 0x19
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_LO_ERR__SHIFT 0x1a
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_HI_ERR__SHIFT 0x1b
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_LO_ERR_MASK 0x00000001L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__CM_RD_HI_ERR_MASK 0x00000002L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_LO_ERR_MASK 0x00000004L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__DB_RD_HI_ERR_MASK 0x00000008L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_LO_ERR_MASK 0x00000010L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_RD_HI_ERR_MASK 0x00000020L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_LO_ERR_MASK 0x00000040L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__IDCT_RD_HI_ERR_MASK 0x00000080L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_LO_ERR_MASK 0x00000100L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MPC_RD_HI_ERR_MASK 0x00000200L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_LO_ERR_MASK 0x00000400L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__LBSI_RD_HI_ERR_MASK 0x00000800L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_LO_ERR_MASK 0x00010000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__RBC_RD_HI_ERR_MASK 0x00020000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_LO_ERR_MASK 0x00040000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP2_HI_ERR_MASK 0x00080000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_LO_ERR_MASK 0x00100000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_BSP3_HI_ERR_MASK 0x00200000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_LO_ERR_MASK 0x00400000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR_HI_ERR_MASK 0x00800000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_LO_ERR_MASK 0x01000000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__MIF_SCLR2_HI_ERR_MASK 0x02000000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_LO_ERR_MASK 0x04000000L
+#define UVD_MEMCHECK2_VCPU_INT_STAT__PREF_HI_ERR_MASK 0x08000000L
+//UVD_MEMCHECK2_VCPU_INT_ACK
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_LO_ACK__SHIFT 0x0
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_HI_ACK__SHIFT 0x1
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_LO_ACK__SHIFT 0x2
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_HI_ACK__SHIFT 0x3
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_LO_ACK__SHIFT 0x4
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_HI_ACK__SHIFT 0x5
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_LO_ACK__SHIFT 0x6
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_HI_ACK__SHIFT 0x7
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_LO_ACK__SHIFT 0x8
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_HI_ACK__SHIFT 0x9
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_LO_ACK__SHIFT 0xa
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_HI_ACK__SHIFT 0xb
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_LO_ACK__SHIFT 0x10
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_HI_ACK__SHIFT 0x11
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_LO_ACK__SHIFT 0x12
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_HI_ACK__SHIFT 0x13
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_LO_ACK__SHIFT 0x14
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_HI_ACK__SHIFT 0x15
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_LO_ACK__SHIFT 0x16
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_HI_ACK__SHIFT 0x17
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_LO_ACK__SHIFT 0x18
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_HI_ACK__SHIFT 0x19
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_LO_ACK__SHIFT 0x1a
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_HI_ACK__SHIFT 0x1b
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_LO_ACK_MASK 0x00000001L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__CM_RD_HI_ACK_MASK 0x00000002L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_LO_ACK_MASK 0x00000004L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__DB_RD_HI_ACK_MASK 0x00000008L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_LO_ACK_MASK 0x00000010L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_RD_HI_ACK_MASK 0x00000020L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_LO_ACK_MASK 0x00000040L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__IDCT_RD_HI_ACK_MASK 0x00000080L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_LO_ACK_MASK 0x00000100L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MPC_RD_HI_ACK_MASK 0x00000200L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_LO_ACK_MASK 0x00000400L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__LBSI_RD_HI_ACK_MASK 0x00000800L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_LO_ACK_MASK 0x00010000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__RBC_RD_HI_ACK_MASK 0x00020000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_LO_ACK_MASK 0x00040000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP2_HI_ACK_MASK 0x00080000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_LO_ACK_MASK 0x00100000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_BSP3_HI_ACK_MASK 0x00200000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_LO_ACK_MASK 0x00400000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR_HI_ACK_MASK 0x00800000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_LO_ACK_MASK 0x01000000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__MIF_SCLR2_HI_ACK_MASK 0x02000000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_LO_ACK_MASK 0x04000000L
+#define UVD_MEMCHECK2_VCPU_INT_ACK__PREF_HI_ACK_MASK 0x08000000L
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index bbe1337a8cee..e68c1e280322 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -182,6 +182,7 @@ enum atom_dgpu_vram_type {
ATOM_DGPU_VRAM_TYPE_HBM2 = 0x60,
ATOM_DGPU_VRAM_TYPE_HBM2E = 0x61,
ATOM_DGPU_VRAM_TYPE_GDDR6 = 0x70,
+ ATOM_DGPU_VRAM_TYPE_HBM3 = 0x80,
};
enum atom_dp_vs_preemph_def{
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index f150404ffc68..f43e29722ef7 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -79,7 +79,14 @@ typedef struct ip_discovery_header
uint32_t id; /* Table ID */
uint16_t num_dies; /* Number of Dies */
die_info die_info[16]; /* list die information for up to 16 dies */
- uint16_t padding[1]; /* padding */
+ union {
+ uint16_t padding[1]; /* version <= 3 */
+ struct { /* version == 4 */
+ uint8_t base_addr_64_bit : 1; /* ip structures are using 64 bit base address */
+ uint8_t reserved : 7;
+ uint8_t reserved2;
+ };
+ };
} ip_discovery_header;
typedef struct ip
@@ -115,9 +122,29 @@ typedef struct ip_v3
uint8_t sub_revision : 4; /* HCID Sub-Revision */
uint8_t variant : 4; /* HW variant */
#endif
- uint32_t base_address[1]; /* Base Address list. Corresponds to the num_base_address field*/
+ uint32_t base_address[]; /* Base Address list. Corresponds to the num_base_address field*/
} ip_v3;
+typedef struct ip_v4 {
+ uint16_t hw_id; /* Hardware ID */
+ uint8_t instance_number; /* Instance number for the IP */
+ uint8_t num_base_address; /* Number of base addresses*/
+ uint8_t major; /* Hardware ID.major version */
+ uint8_t minor; /* Hardware ID.minor version */
+ uint8_t revision; /* Hardware ID.revision version */
+#if defined(LITTLEENDIAN_CPU)
+ uint8_t sub_revision : 4; /* HCID Sub-Revision */
+ uint8_t variant : 4; /* HW variant */
+#elif defined(BIGENDIAN_CPU)
+ uint8_t variant : 4; /* HW variant */
+ uint8_t sub_revision : 4; /* HCID Sub-Revision */
+#endif
+ union {
+ DECLARE_FLEX_ARRAY(uint32_t, base_address); /* 32-bit Base Address list. Corresponds to the num_base_address field*/
+ DECLARE_FLEX_ARRAY(uint64_t, base_address_64); /* 64-bit Base Address list. Corresponds to the num_base_address field*/
+ } __packed;
+} ip_v4;
+
typedef struct die_header
{
uint16_t die_id;
@@ -134,6 +161,7 @@ typedef struct ip_structure
{
ip *ip_list;
ip_v3 *ip_v3_list;
+ ip_v4 *ip_v4_list;
}; /* IP list. Variable size*/
} die;
} ip_structure;
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 5cb3e8634739..8433f99f6667 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -230,28 +230,30 @@ struct kfd2kgd_calls {
/* Register access functions */
void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
- uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+ uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases,
+ uint32_t inst);
int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid,
- unsigned int vmid);
+ unsigned int vmid, uint32_t inst);
- int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id);
+ int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst);
int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
+ struct mm_struct *mm, uint32_t inst);
int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t doorbell_off);
+ uint32_t doorbell_off, uint32_t inst);
int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm);
int (*hqd_dump)(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst);
int (*hqd_sdma_dump)(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
@@ -259,12 +261,12 @@ struct kfd2kgd_calls {
bool (*hqd_is_occupied)(struct amdgpu_device *adev,
uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id);
+ uint32_t queue_id, uint32_t inst);
int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id);
+ uint32_t queue_id, uint32_t inst);
bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd);
@@ -273,7 +275,7 @@ struct kfd2kgd_calls {
int (*wave_control_execute)(struct amdgpu_device *adev,
uint32_t gfx_index_val,
- uint32_t sq_cmd);
+ uint32_t sq_cmd, uint32_t inst);
bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev,
uint8_t vmid,
uint16_t *p_pasid);
@@ -289,10 +291,48 @@ struct kfd2kgd_calls {
uint32_t vmid, uint64_t page_table_base);
uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev);
+ uint32_t (*enable_debug_trap)(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid);
+ uint32_t (*disable_debug_trap)(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid);
+ int (*validate_trap_override_request)(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported);
+ uint32_t (*set_wave_launch_trap_override)(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev);
+ uint32_t (*set_wave_launch_mode)(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid);
+ uint32_t (*set_address_watch)(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst);
+ uint32_t (*clear_address_watch)(struct amdgpu_device *adev,
+ uint32_t watch_id);
+ void (*get_iq_wait_times)(struct amdgpu_device *adev,
+ uint32_t *wait_times,
+ uint32_t inst);
+ void (*build_grace_period_packet_info)(struct amdgpu_device *adev,
+ uint32_t wait_times,
+ uint32_t grace_period,
+ uint32_t *reg_offset,
+ uint32_t *reg_data,
+ uint32_t inst);
void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
- int *wave_cnt, int *max_waves_per_cu);
+ int *wave_cnt, int *max_waves_per_cu, uint32_t inst);
void (*program_trap_handler_settings)(struct amdgpu_device *adev,
- uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
+ uint32_t inst);
};
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 9f542f6e19ed..84c5224d994c 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -132,7 +132,8 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_MEM_TEMP,
AMDGPU_PP_SENSOR_VCE_POWER,
AMDGPU_PP_SENSOR_UVD_POWER,
- AMDGPU_PP_SENSOR_GPU_POWER,
+ AMDGPU_PP_SENSOR_GPU_AVG_POWER,
+ AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
AMDGPU_PP_SENSOR_SS_APU_SHARE,
AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
@@ -892,4 +893,73 @@ struct gpu_metrics_v2_3 {
uint16_t average_temperature_core[8]; // average CPU core temperature on APUs
uint16_t average_temperature_l3[2];
};
+
+struct gpu_metrics_v2_4 {
+ struct metrics_table_header common_header;
+
+ /* Temperature (unit: centi-Celsius) */
+ uint16_t temperature_gfx;
+ uint16_t temperature_soc;
+ uint16_t temperature_core[8];
+ uint16_t temperature_l3[2];
+
+ /* Utilization (unit: centi) */
+ uint16_t average_gfx_activity;
+ uint16_t average_mm_activity;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Power/Energy (unit: mW) */
+ uint16_t average_socket_power;
+ uint16_t average_cpu_power;
+ uint16_t average_soc_power;
+ uint16_t average_gfx_power;
+ uint16_t average_core_power[8];
+
+ /* Average clocks (unit: MHz) */
+ uint16_t average_gfxclk_frequency;
+ uint16_t average_socclk_frequency;
+ uint16_t average_uclk_frequency;
+ uint16_t average_fclk_frequency;
+ uint16_t average_vclk_frequency;
+ uint16_t average_dclk_frequency;
+
+ /* Current clocks (unit: MHz) */
+ uint16_t current_gfxclk;
+ uint16_t current_socclk;
+ uint16_t current_uclk;
+ uint16_t current_fclk;
+ uint16_t current_vclk;
+ uint16_t current_dclk;
+ uint16_t current_coreclk[8];
+ uint16_t current_l3clk[2];
+
+ /* Throttle status (ASIC dependent) */
+ uint32_t throttle_status;
+
+ /* Fans */
+ uint16_t fan_pwm;
+
+ uint16_t padding[3];
+
+ /* Throttle status (ASIC independent) */
+ uint64_t indep_throttle_status;
+
+ /* Average Temperature (unit: centi-Celsius) */
+ uint16_t average_temperature_gfx;
+ uint16_t average_temperature_soc;
+ uint16_t average_temperature_core[8];
+ uint16_t average_temperature_l3[2];
+
+ /* Power/Voltage (unit: mV) */
+ uint16_t average_cpu_voltage;
+ uint16_t average_soc_voltage;
+ uint16_t average_gfx_voltage;
+
+ /* Power/Current (unit: mA) */
+ uint16_t average_cpu_current;
+ uint16_t average_soc_current;
+ uint16_t average_gfx_current;
+};
#endif
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index dc694cb246d9..b1db2b190187 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -274,7 +274,10 @@ union MESAPI__ADD_QUEUE {
uint32_t is_kfd_process : 1;
uint32_t trap_en : 1;
uint32_t is_aql_queue : 1;
- uint32_t reserved : 20;
+ uint32_t skip_process_ctx_clear : 1;
+ uint32_t map_legacy_kq : 1;
+ uint32_t exclusively_scheduled : 1;
+ uint32_t reserved : 17;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;
@@ -523,6 +526,7 @@ enum MESAPI_MISC_OPCODE {
MESAPI_MISC__QUERY_STATUS,
MESAPI_MISC__READ_REG,
MESAPI_MISC__WAIT_REG_MEM,
+ MESAPI_MISC__SET_SHADER_DEBUGGER,
MESAPI_MISC__MAX,
};
@@ -561,6 +565,21 @@ struct QUERY_STATUS {
uint32_t context_id;
};
+struct SET_SHADER_DEBUGGER {
+ uint64_t process_context_addr;
+ union {
+ struct {
+ uint32_t single_memop : 1; /* SQ_DEBUG.single_memop */
+ uint32_t single_alu_op : 1; /* SQ_DEBUG.single_alu_op */
+ uint32_t reserved : 30;
+ };
+ uint32_t u32all;
+ } flags;
+ uint32_t spi_gdbg_per_vmid_cntl;
+ uint32_t tcp_watch_cntl[4]; /* TCP_WATCHx_CNTL */
+ uint32_t trap_en;
+};
+
union MESAPI__MISC {
struct {
union MES_API_HEADER header;
@@ -573,6 +592,9 @@ union MESAPI__MISC {
struct QUERY_STATUS query_status;
struct READ_REG read_reg;
struct WAIT_REG_MEM wait_reg_mem;
+ struct SET_SHADER_DEBUGGER set_shader_debugger;
+ enum MES_AMD_PRIORITY_LEVEL queue_sch_level;
+
uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS];
};
};
diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h
index a0c672889fe4..a2f81b9c38af 100644
--- a/drivers/gpu/drm/amd/include/v9_structs.h
+++ b/drivers/gpu/drm/amd/include/v9_structs.h
@@ -196,10 +196,20 @@ struct v9_mqd {
uint32_t compute_wave_restore_addr_lo;
uint32_t compute_wave_restore_addr_hi;
uint32_t compute_wave_restore_control;
- uint32_t compute_static_thread_mgmt_se4;
- uint32_t compute_static_thread_mgmt_se5;
- uint32_t compute_static_thread_mgmt_se6;
- uint32_t compute_static_thread_mgmt_se7;
+ union {
+ struct {
+ uint32_t compute_static_thread_mgmt_se4;
+ uint32_t compute_static_thread_mgmt_se5;
+ uint32_t compute_static_thread_mgmt_se6;
+ uint32_t compute_static_thread_mgmt_se7;
+ };
+ struct {
+ uint32_t compute_current_logic_xcc_id; // offset: 39 (0x27)
+ uint32_t compute_restart_cg_tg_id; // offset: 40 (0x28)
+ uint32_t compute_tg_chunk_size; // offset: 41 (0x29)
+ uint32_t compute_restore_tg_chunk_size; // offset: 42 (0x2A)
+ };
+ };
uint32_t reserved_43;
uint32_t reserved_44;
uint32_t reserved_45;
@@ -382,8 +392,16 @@ struct v9_mqd {
uint32_t iqtimer_pkt_dw29;
uint32_t iqtimer_pkt_dw30;
uint32_t iqtimer_pkt_dw31;
- uint32_t reserved_225;
- uint32_t reserved_226;
+ union {
+ struct {
+ uint32_t reserved_225;
+ uint32_t reserved_226;
+ };
+ struct {
+ uint32_t pm4_target_xcc_in_xcp; // offset: 225 (0xE1)
+ uint32_t cp_mqd_stride_size; // offset: 226 (0xE2)
+ };
+ };
uint32_t reserved_227;
uint32_t set_resources_header;
uint32_t set_resources_dw1;
diff --git a/drivers/gpu/drm/amd/include/yellow_carp_offset.h b/drivers/gpu/drm/amd/include/yellow_carp_offset.h
index 0fea6a746611..a2c8dca2425e 100644
--- a/drivers/gpu/drm/amd/include/yellow_carp_offset.h
+++ b/drivers/gpu/drm/amd/include/yellow_carp_offset.h
@@ -7,13 +7,11 @@
#define MAX_SEGMENT 6
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
} __maybe_unused;
-struct IP_BASE
-{
+struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
} __maybe_unused;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 58c2246918fd..5b1d73b00ef7 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -35,44 +35,6 @@
#include <linux/pm_runtime.h>
#include <asm/processor.h>
-static const struct cg_flag_name clocks[] = {
- {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
- {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
- {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
- {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
- {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
- {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
- {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
- {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
- {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
- {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
- {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
- {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
- {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
- {0, NULL},
-};
-
static const struct hwmon_temp_label {
enum PP_HWMON_TEMP channel;
const char *label;
@@ -678,7 +640,12 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* clock labeled OD_MCLK
*
* - three <frequency, voltage> points labeled OD_VDDC_CURVE.
- * They can be used to calibrate the sclk voltage curve.
+ * They can be used to calibrate the sclk voltage curve. This is
+ * available for Vega20 and NV1X.
+ *
+ * - voltage offset for the six anchor points of the v/f curve labeled
+ * OD_VDDC_CURVE. They can be used to calibrate the v/f curve. This
+ * is only availabe for some SMU13 ASICs.
*
* - voltage offset(in mV) applied on target voltage calculation.
* This is available for Sienna Cichlid, Navy Flounder and Dimgrey
@@ -719,12 +686,19 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* E.g., "p 2 0 800" would set the minimum core clock on core
* 2 to 800Mhz.
*
- * For sclk voltage curve, enter the new values by writing a
- * string that contains "vc point clock voltage" to the file. The
- * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
- * update point1 with clock set as 300Mhz and voltage as
- * 600mV. "vc 2 1000 1000" will update point3 with clock set
- * as 1000Mhz and voltage 1000mV.
+ * For sclk voltage curve,
+ * - For NV1X, enter the new values by writing a string that
+ * contains "vc point clock voltage" to the file. The points
+ * are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will update
+ * point1 with clock set as 300Mhz and voltage as 600mV. "vc 2
+ * 1000 1000" will update point3 with clock set as 1000Mhz and
+ * voltage 1000mV.
+ * - For SMU13 ASICs, enter the new values by writing a string that
+ * contains "vc anchor_point_index voltage_offset" to the file.
+ * There are total six anchor points defined on the v/f curve with
+ * index as 0 - 5.
+ * - "vc 0 10" will update the voltage offset for point1 as 10mv.
+ * - "vc 5 -10" will update the voltage offset for point6 as -10mv.
*
* To update the voltage offset applied for gfxclk/voltage calculation,
* enter the new value by writing a string that contains "vo offset".
@@ -769,7 +743,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
type = PP_OD_EDIT_CCLK_VDDC_TABLE;
else if (*buf == 'm')
type = PP_OD_EDIT_MCLK_VDDC_TABLE;
- else if(*buf == 'r')
+ else if (*buf == 'r')
type = PP_OD_RESTORE_DEFAULT_TABLE;
else if (*buf == 'c')
type = PP_OD_COMMIT_DPM_TABLE;
@@ -871,13 +845,11 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
}
if (ret == -ENOENT) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
- if (size > 0) {
- size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
- }
+ size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
}
if (size == 0)
@@ -1495,6 +1467,32 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
return -EINVAL;
}
+static unsigned int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
+ enum amd_pp_sensors sensor,
+ void *query)
+{
+ int r, size = sizeof(uint32_t);
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ /* get the sensor value */
+ r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
/**
* DOC: gpu_busy_percent
*
@@ -1509,26 +1507,10 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- int r, value, size = sizeof(value);
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return r;
- }
-
- /* read the IP busy sensor */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
- (void *)&value, &size);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ unsigned int value;
+ int r;
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
if (r)
return r;
@@ -1549,26 +1531,10 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- int r, value, size = sizeof(value);
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return r;
- }
-
- /* read the IP busy sensor */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
- (void *)&value, &size);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
+ unsigned int value;
+ int r;
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
if (r)
return r;
@@ -1842,45 +1808,15 @@ out:
return size;
}
-static int amdgpu_device_read_powershift(struct amdgpu_device *adev,
- uint32_t *ss_power, bool dgpu_share)
-{
- struct drm_device *ddev = adev_to_drm(adev);
- uint32_t size;
- int r = 0;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return r;
- }
-
- if (dgpu_share)
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
- (void *)ss_power, &size);
- else
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
- (void *)ss_power, &size);
-
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return r;
-}
-
static int amdgpu_show_powershift_percent(struct device *dev,
- char *buf, bool dgpu_share)
+ char *buf, enum amd_pp_sensors sensor)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t ss_power;
int r = 0, i;
- r = amdgpu_device_read_powershift(adev, &ss_power, dgpu_share);
+ r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
if (r == -EOPNOTSUPP) {
/* sensor not available on dGPU, try to read from APU */
adev = NULL;
@@ -1893,14 +1829,15 @@ static int amdgpu_show_powershift_percent(struct device *dev,
}
mutex_unlock(&mgpu_info.mutex);
if (adev)
- r = amdgpu_device_read_powershift(adev, &ss_power, dgpu_share);
+ r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
}
- if (!r)
- r = sysfs_emit(buf, "%u%%\n", ss_power);
+ if (r)
+ return r;
- return r;
+ return sysfs_emit(buf, "%u%%\n", ss_power);
}
+
/**
* DOC: smartshift_apu_power
*
@@ -1914,7 +1851,7 @@ static int amdgpu_show_powershift_percent(struct device *dev,
static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return amdgpu_show_powershift_percent(dev, buf, false);
+ return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
}
/**
@@ -1930,7 +1867,7 @@ static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device
static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return amdgpu_show_powershift_percent(dev, buf, true);
+ return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
}
/**
@@ -1993,7 +1930,6 @@ out:
return r;
}
-
static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states)
{
@@ -2006,15 +1942,15 @@ static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device
static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states)
{
- uint32_t ss_power, size;
+ uint32_t ss_power;
if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
*states = ATTR_STATE_UNSUPPORTED;
- else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
- (void *)&ss_power, &size))
+ else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
+ (void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
- else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
- (void *)&ss_power, &size))
+ else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
+ (void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
@@ -2077,8 +2013,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
if (gc_ver < IP_VERSION(9, 0, 0) ||
- gc_ver == IP_VERSION(9, 4, 1) ||
- gc_ver == IP_VERSION(9, 4, 2))
+ !amdgpu_device_has_display_hardware(adev))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
if (mp1_ver < IP_VERSION(10, 0, 0))
@@ -2100,6 +2035,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
@@ -2110,7 +2046,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
*states = ATTR_STATE_UNSUPPORTED;
}
} else if (DEVICE_ATTR_IS(pp_features)) {
- if (adev->flags & AMD_IS_APU || gc_ver < IP_VERSION(9, 0, 0))
+ if ((adev->flags & AMD_IS_APU &&
+ gc_ver != IP_VERSION(9, 4, 3)) ||
+ gc_ver < IP_VERSION(9, 0, 0))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(gpu_metrics)) {
if (gc_ver < IP_VERSION(9, 1, 0))
@@ -2121,7 +2059,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3)))
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(9, 4, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2135,7 +2074,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
gc_ver == IP_VERSION(11, 0, 2) ||
- gc_ver == IP_VERSION(11, 0, 3)))
+ gc_ver == IP_VERSION(11, 0, 3) ||
+ gc_ver == IP_VERSION(9, 4, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
@@ -2197,15 +2137,19 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev,
uint32_t mask, struct list_head *attr_list)
{
int ret = 0;
- struct device_attribute *dev_attr = &attr->dev_attr;
- const char *name = dev_attr->attr.name;
enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
struct amdgpu_device_attr_entry *attr_entry;
+ struct device_attribute *dev_attr;
+ const char *name;
int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
- BUG_ON(!attr);
+ if (!attr)
+ return -EINVAL;
+
+ dev_attr = &attr->dev_attr;
+ name = dev_attr->attr.name;
attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
@@ -2291,46 +2235,32 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int channel = to_sensor_dev_attr(attr)->index;
- int r, temp = 0, size = sizeof(temp);
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ int r, temp = 0;
if (channel >= PP_TEMP_MAX)
return -EINVAL;
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
-
switch (channel) {
case PP_TEMP_JUNCTION:
/* get current junction temperature */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
- (void *)&temp, &size);
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
+ (void *)&temp);
break;
case PP_TEMP_EDGE:
/* get current edge temperature */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
- (void *)&temp, &size);
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
+ (void *)&temp);
break;
case PP_TEMP_MEM:
/* get current memory temperature */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
- (void *)&temp, &size);
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
+ (void *)&temp);
break;
default:
r = -EINVAL;
break;
}
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-
if (r)
return r;
@@ -2614,25 +2544,10 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 min_rpm = 0;
- u32 size = sizeof(min_rpm);
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
-
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
- (void *)&min_rpm, &size);
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
+ (void *)&min_rpm);
if (r)
return r;
@@ -2646,25 +2561,10 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 max_rpm = 0;
- u32 size = sizeof(max_rpm);
int r;
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
-
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
- (void *)&max_rpm, &size);
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+ (void *)&max_rpm);
if (r)
return r;
@@ -2826,26 +2726,11 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 vddgfx;
- int r, size = sizeof(vddgfx);
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
+ int r;
/* get the voltage */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
- (void *)&vddgfx, &size);
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
+ (void *)&vddgfx);
if (r)
return r;
@@ -2865,30 +2750,15 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 vddnb;
- int r, size = sizeof(vddnb);
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
+ int r;
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
-
/* get the voltage */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
- (void *)&vddnb, &size);
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
+ (void *)&vddnb);
if (r)
return r;
@@ -2902,40 +2772,48 @@ static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
return sysfs_emit(buf, "vddnb\n");
}
-static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static unsigned int amdgpu_hwmon_get_power(struct device *dev,
+ enum amd_pp_sensors sensor)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
+ unsigned int uw;
u32 query = 0;
- int r, size = sizeof(u32);
- unsigned uw;
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
-
- /* get the voltage */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
- (void *)&query, &size);
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ int r;
+ r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
if (r)
return r;
/* convert to microwatts */
uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
- return sysfs_emit(buf, "%u\n", uw);
+ return uw;
+}
+
+static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned int val;
+
+ val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
+ if (val < 0)
+ return val;
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned int val;
+
+ val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
+ if (val < 0)
+ return val;
+
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
@@ -3070,26 +2948,11 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t sclk;
- int r, size = sizeof(sclk);
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
+ int r;
/* get the sclk */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
- (void *)&sclk, &size);
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
+ (void *)&sclk);
if (r)
return r;
@@ -3109,26 +2972,11 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t mclk;
- int r, size = sizeof(mclk);
-
- if (amdgpu_in_reset(adev))
- return -EPERM;
- if (adev->in_suspend && !adev->in_runpm)
- return -EPERM;
-
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
+ int r;
/* get the sclk */
- r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
- (void *)&mclk, &size);
-
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
+ (void *)&mclk);
if (r)
return r;
@@ -3188,6 +3036,8 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
*
+ * - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU.
+ *
* - power1_cap_min: minimum cap supported in microWatts
*
* - power1_cap_max: maximum cap supported in microWatts
@@ -3256,6 +3106,7 @@ static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NU
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
@@ -3302,6 +3153,7 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_label.dev_attr.attr,
&sensor_dev_attr_power1_average.dev_attr.attr,
+ &sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
&sensor_dev_attr_power1_cap_min.dev_attr.attr,
&sensor_dev_attr_power1_cap.dev_attr.attr,
@@ -3327,6 +3179,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
+ uint32_t tmp;
/* under multi-vf mode, the hwmon attributes are all not supported */
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
@@ -3362,7 +3215,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* Skip crit temp on APU */
- if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
+ if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
+ (gc_ver == IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
return 0;
@@ -3395,9 +3249,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR;
- /* In the case of APUs, this is only implemented on Vangogh */
+ /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
if (((adev->family == AMDGPU_FAMILY_SI) ||
- ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)))) &&
+ ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
+ (gc_ver != IP_VERSION(9, 4, 3)))) &&
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
@@ -3410,6 +3265,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
(attr == &sensor_dev_attr_power1_average.dev_attr.attr))
return 0;
+ /* not all products support both average and instantaneous */
+ if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
+ amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
+ return 0;
+ if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
+ amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
+ return 0;
+
/* hide max/min values if we can't both query and manage the fan */
if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
(amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
@@ -3426,36 +3289,48 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
- adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
+ adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
+ (gc_ver == IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
attr == &sensor_dev_attr_in0_label.dev_attr.attr))
return 0;
- /* only APUs have vddnb */
- if (!(adev->flags & AMD_IS_APU) &&
+ /* only APUs other than gc 9,4,3 have vddnb */
+ if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
- /* no mclk on APUs */
- if ((adev->flags & AMD_IS_APU) &&
+ /* no mclk on APUs other than gc 9,4,3*/
+ if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
return 0;
- /* only SOC15 dGPUs support hotspot and mem temperatures */
if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
+ (gc_ver != IP_VERSION(9, 4, 3)) &&
+ (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
+ return 0;
+
+ /* hotspot temperature for gc 9,4,3*/
+ if ((gc_ver == IP_VERSION(9, 4, 3)) &&
+ (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp1_label.dev_attr.attr))
+ return 0;
+
+ /* only SOC15 dGPUs support hotspot and mem temperatures */
+ if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) ||
+ (gc_ver == IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
+ attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
return 0;
/* only Vangogh has fast PPT limit and power labels */
@@ -3544,7 +3419,8 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
- struct amdgpu_device *adev) {
+ struct amdgpu_device *adev)
+{
uint16_t *p_val;
uint32_t size;
int i;
@@ -3593,7 +3469,7 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
seq_printf(m, "\t%u mV (VDDNB)\n", value);
size = sizeof(uint32_t);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
size = sizeof(value);
seq_printf(m, "\n");
@@ -3660,6 +3536,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
return 0;
}
+static const struct cg_flag_name clocks[] = {
+ {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
+ {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
+ {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
+ {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
+ {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
+ {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
+ {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
+ {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
+ {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
+ {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
+ {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
+ {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
+ {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
+ {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
+ {0, NULL},
+};
+
static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
{
int i;
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index d178f3f44081..42172b00be66 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -89,6 +89,8 @@ struct amdgpu_dpm_thermal {
int max_mem_crit_temp;
/* memory max emergency(shutdown) temp */
int max_mem_emergency_temp;
+ /* SWCTF threshold */
+ int sw_ctf_threshold;
/* was last interrupt low to high or high to low */
bool high_to_low;
/* interrupt source */
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
index 52045ad59bed..eec816f0cbf9 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
@@ -24,8 +24,7 @@
#ifndef __AMDGPU_PM_H__
#define __AMDGPU_PM_H__
-struct cg_flag_name
-{
+struct cg_flag_name {
u64 flag;
const char *name;
};
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h
index 566a0da59e53..251ed011b3b0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h
@@ -38,13 +38,11 @@
#define SMU_13_0_0_PP_THERMALCONTROLLER_NONE 0
#define SMU_13_0_0_PP_THERMALCONTROLLER_NAVI21 28
-#define SMU_13_0_0_PP_OVERDRIVE_VERSION 0x81 // OverDrive 8 Table Version 0.2
+#define SMU_13_0_0_PP_OVERDRIVE_VERSION 0x83 // OverDrive 8 Table Version 0.2
#define SMU_13_0_0_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00
-enum SMU_13_0_0_ODFEATURE_CAP
-{
+enum SMU_13_0_0_ODFEATURE_CAP {
SMU_13_0_0_ODCAP_GFXCLK_LIMITS = 0,
- SMU_13_0_0_ODCAP_GFXCLK_CURVE,
SMU_13_0_0_ODCAP_UCLK_LIMITS,
SMU_13_0_0_ODCAP_POWER_LIMIT,
SMU_13_0_0_ODCAP_FAN_ACOUSTIC_LIMIT,
@@ -59,13 +57,12 @@ enum SMU_13_0_0_ODFEATURE_CAP
SMU_13_0_0_ODCAP_FAN_CURVE,
SMU_13_0_0_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT,
SMU_13_0_0_ODCAP_POWER_MODE,
+ SMU_13_0_0_ODCAP_PER_ZONE_GFX_VOLTAGE_OFFSET,
SMU_13_0_0_ODCAP_COUNT,
};
-enum SMU_13_0_0_ODFEATURE_ID
-{
+enum SMU_13_0_0_ODFEATURE_ID {
SMU_13_0_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_13_0_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature
- SMU_13_0_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_13_0_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature
SMU_13_0_0_ODFEATURE_UCLK_LIMITS = 1 << SMU_13_0_0_ODCAP_UCLK_LIMITS, //UCLK Limit feature
SMU_13_0_0_ODFEATURE_POWER_LIMIT = 1 << SMU_13_0_0_ODCAP_POWER_LIMIT, //Power Limit feature
SMU_13_0_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_13_0_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature
@@ -80,19 +77,15 @@ enum SMU_13_0_0_ODFEATURE_ID
SMU_13_0_0_ODFEATURE_FAN_CURVE = 1 << SMU_13_0_0_ODCAP_FAN_CURVE, //Fan Curve feature
SMU_13_0_0_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_13_0_0_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, //Auto Fan Acoustic RPM feature
SMU_13_0_0_ODFEATURE_POWER_MODE = 1 << SMU_13_0_0_ODCAP_POWER_MODE, //Optimized GPU Power Mode feature
+ SMU_13_0_0_ODFEATURE_PER_ZONE_GFX_VOLTAGE_OFFSET = 1 << SMU_13_0_0_ODCAP_PER_ZONE_GFX_VOLTAGE_OFFSET, //Perzone voltage offset feature
SMU_13_0_0_ODFEATURE_COUNT = 16,
};
#define SMU_13_0_0_MAX_ODFEATURE 32 //Maximum Number of OD Features
-enum SMU_13_0_0_ODSETTING_ID
-{
+enum SMU_13_0_0_ODSETTING_ID {
SMU_13_0_0_ODSETTING_GFXCLKFMAX = 0,
SMU_13_0_0_ODSETTING_GFXCLKFMIN,
- SMU_13_0_0_ODSETTING_CUSTOM_GFX_VF_CURVE_A,
- SMU_13_0_0_ODSETTING_CUSTOM_GFX_VF_CURVE_B,
- SMU_13_0_0_ODSETTING_CUSTOM_GFX_VF_CURVE_C,
- SMU_13_0_0_ODSETTING_CUSTOM_CURVE_VFT_FMIN,
SMU_13_0_0_ODSETTING_UCLKFMIN,
SMU_13_0_0_ODSETTING_UCLKFMAX,
SMU_13_0_0_ODSETTING_POWERPERCENTAGE,
@@ -117,12 +110,17 @@ enum SMU_13_0_0_ODSETTING_ID
SMU_13_0_0_ODSETTING_FAN_CURVE_SPEED_5,
SMU_13_0_0_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT,
SMU_13_0_0_ODSETTING_POWER_MODE,
+ SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_1,
+ SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_2,
+ SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_3,
+ SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_4,
+ SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_5,
+ SMU_13_0_0_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_6,
SMU_13_0_0_ODSETTING_COUNT,
};
#define SMU_13_0_0_MAX_ODSETTING 64 //Maximum Number of ODSettings
-enum SMU_13_0_0_PWRMODE_SETTING
-{
+enum SMU_13_0_0_PWRMODE_SETTING {
SMU_13_0_0_PMSETTING_POWER_LIMIT_QUIET = 0,
SMU_13_0_0_PMSETTING_POWER_LIMIT_BALANCE,
SMU_13_0_0_PMSETTING_POWER_LIMIT_TURBO,
@@ -142,8 +140,7 @@ enum SMU_13_0_0_PWRMODE_SETTING
};
#define SMU_13_0_0_MAX_PMSETTING 32 //Maximum Number of PowerMode Settings
-struct smu_13_0_0_overdrive_table
-{
+struct smu_13_0_0_overdrive_table {
uint8_t revision; //Revision = SMU_13_0_0_PP_OVERDRIVE_VERSION
uint8_t reserve[3]; //Zero filled field reserved for future use
uint32_t feature_count; //Total number of supported features
@@ -154,8 +151,7 @@ struct smu_13_0_0_overdrive_table
int16_t pm_setting[SMU_13_0_0_MAX_PMSETTING]; //Optimized power mode feature settings
};
-enum SMU_13_0_0_PPCLOCK_ID
-{
+enum SMU_13_0_0_PPCLOCK_ID {
SMU_13_0_0_PPCLOCK_GFXCLK = 0,
SMU_13_0_0_PPCLOCK_SOCCLK,
SMU_13_0_0_PPCLOCK_UCLK,
@@ -173,8 +169,7 @@ enum SMU_13_0_0_PPCLOCK_ID
};
#define SMU_13_0_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks
-struct smu_13_0_0_powerplay_table
-{
+struct smu_13_0_0_powerplay_table {
struct atom_common_table_header header; //For SMU13, header.format_revision = 15, header.content_revision = 0
uint8_t table_revision; //For SMU13, table_revision = 2
uint8_t padding;
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index f5e08b60f66e..5d28c951a319 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -191,8 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
}
#if 0
-static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
-{
+static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 1, 4, 1 },
{ 2, 5, 1 },
@@ -204,32 +203,27 @@ static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
{ 0xffffffff }
};
-static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
-{
+static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 0xffffffff }
};
-static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
-{
+static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 0xffffffff }
};
-static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
-{
+static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 0xffffffff }
};
-static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
-{
+static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 0xffffffff }
};
-static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
-{
+static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = {
{ 0, 4, 1 },
{ 1, 4, 1 },
{ 2, 5, 1 },
@@ -260,39 +254,32 @@ static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
{ 0xffffffff }
};
-static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
-{
+static const struct kv_lcac_config_reg sx0_cac_config_reg[] = {
{ 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
-static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
-{
+static const struct kv_lcac_config_reg mc0_cac_config_reg[] = {
{ 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
-static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
-{
+static const struct kv_lcac_config_reg mc1_cac_config_reg[] = {
{ 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
-static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
-{
+static const struct kv_lcac_config_reg mc2_cac_config_reg[] = {
{ 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
-static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
-{
+static const struct kv_lcac_config_reg mc3_cac_config_reg[] = {
{ 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
-static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
-{
+static const struct kv_lcac_config_reg cpl_cac_config_reg[] = {
{ 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
#endif
-static const struct kv_pt_config_reg didt_config_kv[] =
-{
+static const struct kv_pt_config_reg didt_config_kv[] = {
{ 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
{ 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
@@ -508,19 +495,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
pi->caps_db_ramping ||
pi->caps_td_ramping ||
pi->caps_tcp_ramping) {
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable) {
ret = kv_program_pt_config_registers(adev, didt_config_kv);
if (ret) {
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return ret;
}
}
kv_do_enable_didt(adev, enable);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
return 0;
@@ -1173,9 +1160,9 @@ static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev)
pi->graphics_level[i].ClkBypassCntl = 2;
else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
pi->graphics_level[i].ClkBypassCntl = 7;
- else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
+ else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200)
pi->graphics_level[i].ClkBypassCntl = 6;
- else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
+ else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200)
pi->graphics_level[i].ClkBypassCntl = 8;
else
pi->graphics_level[i].ClkBypassCntl = 0;
@@ -1825,7 +1812,7 @@ static void kv_set_valid_clock_range(struct amdgpu_device *adev,
if ((new_ps->levels[0].sclk -
table->entries[pi->highest_valid].sclk_frequency) >
(table->entries[pi->lowest_valid].sclk_frequency -
- new_ps->levels[new_ps->num_levels -1].sclk))
+ new_ps->levels[new_ps->num_levels - 1].sclk))
pi->highest_valid = pi->lowest_valid;
else
pi->lowest_valid = pi->highest_valid;
@@ -3333,8 +3320,7 @@ static const struct amd_ip_funcs kv_dpm_ip_funcs = {
.set_powergating_state = kv_dpm_set_powergating_state,
};
-const struct amdgpu_ip_block_version kv_smu_ip_block =
-{
+const struct amdgpu_ip_block_version kv_smu_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SMC,
.major = 1,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index d3fe149d8476..81fb4e5dd804 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -794,7 +794,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
struct i2c_board_info info = { };
const char *name = pp_lib_thermal_controller_names[controller->ucType];
info.addr = controller->ucI2cAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
+ strscpy(info.type, name, sizeof(info.type));
i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
}
} else {
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h
index 055321f61ca7..3e7caa715533 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h
@@ -117,8 +117,7 @@ enum r600_display_watermark {
R600_DISPLAY_WATERMARK_HIGH = 1,
};
-enum r600_display_gap
-{
+enum r600_display_gap {
R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
R600_PM_DISPLAY_GAP_VBLANK = 1,
R600_PM_DISPLAY_GAP_WATERMARK = 2,
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index d6d9e3b1b2c0..02e69ccff3ba 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0;
}
-static int si_set_temperature_range(struct amdgpu_device *adev)
-{
- int ret;
-
- ret = si_thermal_enable_alert(adev, false);
- if (ret)
- return ret;
- ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- ret = si_thermal_enable_alert(adev, true);
- if (ret)
- return ret;
-
- return ret;
-}
-
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle)
{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->pm.dpm_enabled)
- return 0;
-
- ret = si_set_temperature_range(adev);
- if (ret)
- return ret;
-#if 0 //TODO ?
- si_dpm_powergate_uvd(adev, true);
-#endif
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h b/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h
index c7dc117a688c..90ec411c5029 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h
@@ -29,8 +29,7 @@
#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-struct PP_SIslands_Dpm2PerfLevel
-{
+struct PP_SIslands_Dpm2PerfLevel {
uint8_t MaxPS;
uint8_t TgtAct;
uint8_t MaxPS_StepInc;
@@ -47,8 +46,7 @@ struct PP_SIslands_Dpm2PerfLevel
typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
-struct PP_SIslands_DPM2Status
-{
+struct PP_SIslands_DPM2Status {
uint32_t dpm2Flags;
uint8_t CurrPSkip;
uint8_t CurrPSkipPowerShift;
@@ -68,8 +66,7 @@ struct PP_SIslands_DPM2Status
typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
-struct PP_SIslands_DPM2Parameters
-{
+struct PP_SIslands_DPM2Parameters {
uint32_t TDPLimit;
uint32_t NearTDPLimit;
uint32_t SafePowerLimit;
@@ -78,8 +75,7 @@ struct PP_SIslands_DPM2Parameters
};
typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
-struct PP_SIslands_PAPMStatus
-{
+struct PP_SIslands_PAPMStatus {
uint32_t EstimatedDGPU_T;
uint32_t EstimatedDGPU_P;
uint32_t EstimatedAPU_T;
@@ -89,8 +85,7 @@ struct PP_SIslands_PAPMStatus
};
typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
-struct PP_SIslands_PAPMParameters
-{
+struct PP_SIslands_PAPMParameters {
uint32_t NearTDPLimitTherm;
uint32_t NearTDPLimitPAPM;
uint32_t PlatformPowerLimit;
@@ -100,8 +95,7 @@ struct PP_SIslands_PAPMParameters
};
typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
-struct SISLANDS_SMC_SCLK_VALUE
-{
+struct SISLANDS_SMC_SCLK_VALUE {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
@@ -113,8 +107,7 @@ struct SISLANDS_SMC_SCLK_VALUE
typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
-struct SISLANDS_SMC_MCLK_VALUE
-{
+struct SISLANDS_SMC_MCLK_VALUE {
uint32_t vMPLL_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL_1;
uint32_t vMPLL_FUNC_CNTL_2;
@@ -129,8 +122,7 @@ struct SISLANDS_SMC_MCLK_VALUE
typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
-struct SISLANDS_SMC_VOLTAGE_VALUE
-{
+struct SISLANDS_SMC_VOLTAGE_VALUE {
uint16_t value;
uint8_t index;
uint8_t phase_settings;
@@ -138,8 +130,7 @@ struct SISLANDS_SMC_VOLTAGE_VALUE
typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
-struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
+struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL {
uint8_t ACIndex;
uint8_t displayWatermark;
uint8_t gen2PCIE;
@@ -180,8 +171,7 @@ struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
-struct SISLANDS_SMC_SWSTATE
-{
+struct SISLANDS_SMC_SWSTATE {
uint8_t flags;
uint8_t levelCount;
uint8_t padding2;
@@ -205,8 +195,7 @@ struct SISLANDS_SMC_SWSTATE_SINGLE {
#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
-struct SISLANDS_SMC_VOLTAGEMASKTABLE
-{
+struct SISLANDS_SMC_VOLTAGEMASKTABLE {
uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
};
@@ -214,8 +203,7 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
#define SISLANDS_MAX_NO_VREG_STEPS 32
-struct SISLANDS_SMC_STATETABLE
-{
+struct SISLANDS_SMC_STATETABLE {
uint8_t thermalProtectType;
uint8_t systemFlags;
uint8_t maxVDDCIndexInPPTable;
@@ -254,8 +242,7 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
-struct PP_SIslands_FanTable
-{
+struct PP_SIslands_FanTable {
uint8_t fdo_mode;
uint8_t padding;
int16_t temp_min;
@@ -285,8 +272,7 @@ typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
#define SMC_SISLANDS_SCALE_I 7
#define SMC_SISLANDS_SCALE_R 12
-struct PP_SIslands_CacConfig
-{
+struct PP_SIslands_CacConfig {
uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
uint32_t lkge_lut_V0;
uint32_t lkge_lut_Vstep;
@@ -308,23 +294,20 @@ typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
-struct SMC_SIslands_MCRegisterAddress
-{
+struct SMC_SIslands_MCRegisterAddress {
uint16_t s0;
uint16_t s1;
};
typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
-struct SMC_SIslands_MCRegisterSet
-{
+struct SMC_SIslands_MCRegisterSet {
uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
};
typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
-struct SMC_SIslands_MCRegisters
-{
+struct SMC_SIslands_MCRegisters {
uint8_t last;
uint8_t reserved[3];
SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
@@ -333,8 +316,7 @@ struct SMC_SIslands_MCRegisters
typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
-struct SMC_SIslands_MCArbDramTimingRegisterSet
-{
+struct SMC_SIslands_MCArbDramTimingRegisterSet {
uint32_t mc_arb_dram_timing;
uint32_t mc_arb_dram_timing2;
uint8_t mc_arb_rfsh_rate;
@@ -344,8 +326,7 @@ struct SMC_SIslands_MCArbDramTimingRegisterSet
typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
-struct SMC_SIslands_MCArbDramTimingRegisters
-{
+struct SMC_SIslands_MCArbDramTimingRegisters {
uint8_t arb_current;
uint8_t reserved[3];
SMC_SIslands_MCArbDramTimingRegisterSet data[16];
@@ -353,8 +334,7 @@ struct SMC_SIslands_MCArbDramTimingRegisters
typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
-struct SMC_SISLANDS_SPLL_DIV_TABLE
-{
+struct SMC_SISLANDS_SPLL_DIV_TABLE {
uint32_t freq[256];
uint32_t ss[256];
};
@@ -374,8 +354,7 @@ typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
-struct Smc_SIslands_DTE_Configuration
-{
+struct Smc_SIslands_DTE_Configuration {
uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
uint32_t K;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 11b7b4cffaae..9e4f8a4104a3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -26,6 +26,7 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/firmware.h>
+#include <linux/reboot.h>
#include "amd_shared.h"
#include "amd_powerplay.h"
#include "power_state.h"
@@ -91,6 +92,45 @@ static int pp_early_init(void *handle)
return 0;
}
+static void pp_swctf_delayed_work_handler(struct work_struct *work)
+{
+ struct pp_hwmgr *hwmgr =
+ container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
+ struct amdgpu_device *adev = hwmgr->adev;
+ struct amdgpu_dpm_thermal *range =
+ &adev->pm.dpm.thermal;
+ uint32_t gpu_temperature, size;
+ int ret;
+
+ /*
+ * If the hotspot/edge temperature is confirmed as below SW CTF setting point
+ * after the delay enforced, nothing will be done.
+ * Otherwise, a graceful shutdown will be performed to prevent further damage.
+ */
+ if (range->sw_ctf_threshold &&
+ hwmgr->hwmgr_func->read_sensor) {
+ ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
+ AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
+ &gpu_temperature,
+ &size);
+ /*
+ * For some legacy ASICs, hotspot temperature retrieving might be not
+ * supported. Check the edge temperature instead then.
+ */
+ if (ret == -EOPNOTSUPP)
+ ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
+ AMDGPU_PP_SENSOR_EDGE_TEMP,
+ &gpu_temperature,
+ &size);
+ if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
+ return;
+ }
+
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
+}
+
static int pp_sw_init(void *handle)
{
struct amdgpu_device *adev = handle;
@@ -101,6 +141,10 @@ static int pp_sw_init(void *handle)
pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
+ if (!ret)
+ INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
+ pp_swctf_delayed_work_handler);
+
return ret;
}
@@ -135,6 +179,8 @@ static int pp_hw_fini(void *handle)
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
+
hwmgr_hw_fini(hwmgr);
return 0;
@@ -221,6 +267,8 @@ static int pp_suspend(void *handle)
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
+
return hwmgr_suspend(hwmgr);
}
@@ -564,7 +612,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
memset(data, 0, sizeof(*data));
- if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps)
return -EINVAL;
data->nums = hwmgr->num_ps;
@@ -596,7 +644,7 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
return -EINVAL;
*table = (char *)hwmgr->soft_pp_table;
@@ -954,7 +1002,7 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- if (!hwmgr || !hwmgr->pm_en ||!limit)
+ if (!hwmgr || !hwmgr->pm_en || !limit)
return -EINVAL;
if (power_type != PP_PWR_TYPE_SUSTAINED)
@@ -999,7 +1047,7 @@ static int pp_get_display_power_level(void *handle,
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr || !hwmgr->pm_en ||!output)
+ if (!hwmgr || !hwmgr->pm_en || !output)
return -EINVAL;
return phm_get_dal_power_level(hwmgr, output);
@@ -1072,7 +1120,7 @@ static int pp_get_clock_by_type_with_latency(void *handle,
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr || !hwmgr->pm_en ||!clocks)
+ if (!hwmgr || !hwmgr->pm_en || !clocks)
return -EINVAL;
return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
@@ -1084,7 +1132,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr || !hwmgr->pm_en ||!clocks)
+ if (!hwmgr || !hwmgr->pm_en || !clocks)
return -EINVAL;
return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
@@ -1107,7 +1155,7 @@ static int pp_display_clock_voltage_request(void *handle,
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr || !hwmgr->pm_en ||!clock)
+ if (!hwmgr || !hwmgr->pm_en || !clock)
return -EINVAL;
return phm_display_clock_voltage_request(hwmgr, clock);
@@ -1119,7 +1167,7 @@ static int pp_get_display_mode_validation_clocks(void *handle,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- if (!hwmgr || !hwmgr->pm_en ||!clocks)
+ if (!hwmgr || !hwmgr->pm_en || !clocks)
return -EINVAL;
clocks->level = PP_DAL_POWERLEVEL_7;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c
index 45f608838f6e..65b95d6be5c5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c
@@ -38,8 +38,7 @@
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_sh_mask.h"
-static const struct baco_cmd_entry gpio_tbl[] =
-{
+static const struct baco_cmd_entry gpio_tbl[] = {
{ CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
@@ -52,15 +51,13 @@ static const struct baco_cmd_entry gpio_tbl[] =
{ CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
};
-static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
-{
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
{ CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
};
-static const struct baco_cmd_entry use_bclk_tbl[] =
-{
+static const struct baco_cmd_entry use_bclk_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
@@ -82,8 +79,7 @@ static const struct baco_cmd_entry use_bclk_tbl[] =
{ CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
};
-static const struct baco_cmd_entry turn_off_plls_tbl[] =
-{
+static const struct baco_cmd_entry turn_off_plls_tbl[] = {
{ CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 },
@@ -120,8 +116,7 @@ static const struct baco_cmd_entry turn_off_plls_tbl[] =
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 }
};
-static const struct baco_cmd_entry enter_baco_tbl[] =
-{
+static const struct baco_cmd_entry enter_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
@@ -136,8 +131,7 @@ static const struct baco_cmd_entry enter_baco_tbl[] =
#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
-static const struct baco_cmd_entry exit_baco_tbl[] =
-{
+static const struct baco_cmd_entry exit_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
@@ -152,8 +146,7 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
};
-static const struct baco_cmd_entry clean_baco_tbl[] =
-{
+static const struct baco_cmd_entry clean_baco_tbl[] = {
{ CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
{ CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
{ CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c
index 1c73776bd606..fd79337a3536 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c
@@ -42,7 +42,7 @@ static bool baco_wait_register(struct pp_hwmgr *hwmgr, u32 reg, u32 mask, u32 va
}
static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 mask,
- u32 shift, u32 value, u32 timeout)
+ u32 shift, u32 value, u32 timeout)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
u32 data;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c
index c0368f2dfb21..b3e768fa79f2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c
@@ -36,8 +36,7 @@
#include "smu/smu_7_1_3_sh_mask.h"
-static const struct baco_cmd_entry gpio_tbl[] =
-{
+static const struct baco_cmd_entry gpio_tbl[] = {
{ CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
@@ -50,15 +49,13 @@ static const struct baco_cmd_entry gpio_tbl[] =
{ CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
};
-static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
-{
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
{ CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
};
-static const struct baco_cmd_entry use_bclk_tbl[] =
-{
+static const struct baco_cmd_entry use_bclk_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
@@ -78,8 +75,7 @@ static const struct baco_cmd_entry use_bclk_tbl[] =
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
};
-static const struct baco_cmd_entry turn_off_plls_tbl[] =
-{
+static const struct baco_cmd_entry turn_off_plls_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
@@ -88,8 +84,7 @@ static const struct baco_cmd_entry turn_off_plls_tbl[] =
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }
};
-static const struct baco_cmd_entry clk_req_b_tbl[] =
-{
+static const struct baco_cmd_entry clk_req_b_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
@@ -104,8 +99,7 @@ static const struct baco_cmd_entry clk_req_b_tbl[] =
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
};
-static const struct baco_cmd_entry enter_baco_tbl[] =
-{
+static const struct baco_cmd_entry enter_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
@@ -122,8 +116,7 @@ static const struct baco_cmd_entry enter_baco_tbl[] =
#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
-static const struct baco_cmd_entry exit_baco_tbl[] =
-{
+static const struct baco_cmd_entry exit_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
@@ -138,8 +131,7 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
};
-static const struct baco_cmd_entry clean_baco_tbl[] =
-{
+static const struct baco_cmd_entry clean_baco_tbl[] = {
{ CMD_WRITE, mmBIOS_SCRATCH_0, 0, 0, 0, 0 },
{ CMD_WRITE, mmBIOS_SCRATCH_1, 0, 0, 0, 0 },
{ CMD_WRITE, mmBIOS_SCRATCH_2, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
index 981dc8c7112d..90452b66e107 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
@@ -241,7 +241,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
TEMP_RANGE_MAX,
TEMP_RANGE_MIN,
TEMP_RANGE_MAX,
- TEMP_RANGE_MAX};
+ TEMP_RANGE_MAX,
+ 0};
struct amdgpu_device *adev = hwmgr->adev;
if (!hwmgr->not_vf)
@@ -265,6 +266,7 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
+ adev->pm.dpm.thermal.sw_ctf_threshold = range.sw_ctf_threshold;
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
index f2cef0930aa9..2b5ac21fee39 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
@@ -120,7 +120,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
case CHIP_TOPAZ:
hwmgr->smumgr_funcs = &iceland_smu_funcs;
topaz_set_asic_special_caps(hwmgr);
- hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
+ hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
hwmgr->pp_table_version = PP_TABLE_V0;
hwmgr->od_enabled = false;
@@ -133,7 +133,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
case CHIP_FIJI:
hwmgr->smumgr_funcs = &fiji_smu_funcs;
fiji_set_asic_special_caps(hwmgr);
- hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
+ hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
break;
case CHIP_POLARIS11:
@@ -195,7 +195,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
{
- if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
+ if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
return -EINVAL;
phm_register_irq_handlers(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c
index 8f8e296f2fe9..a6a6d43b09f8 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c
@@ -35,8 +35,7 @@
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
-static const struct baco_cmd_entry gpio_tbl[] =
-{
+static const struct baco_cmd_entry gpio_tbl[] = {
{ CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
@@ -49,15 +48,13 @@ static const struct baco_cmd_entry gpio_tbl[] =
{ CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
};
-static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
-{
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
{ CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
};
-static const struct baco_cmd_entry use_bclk_tbl[] =
-{
+static const struct baco_cmd_entry use_bclk_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
@@ -70,8 +67,7 @@ static const struct baco_cmd_entry use_bclk_tbl[] =
{ CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
};
-static const struct baco_cmd_entry turn_off_plls_tbl[] =
-{
+static const struct baco_cmd_entry turn_off_plls_tbl[] = {
{ CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
{ CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
{ CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
@@ -92,8 +88,7 @@ static const struct baco_cmd_entry turn_off_plls_tbl[] =
{ CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
};
-static const struct baco_cmd_entry clk_req_b_tbl[] =
-{
+static const struct baco_cmd_entry clk_req_b_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 },
@@ -108,8 +103,7 @@ static const struct baco_cmd_entry clk_req_b_tbl[] =
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }
};
-static const struct baco_cmd_entry enter_baco_tbl[] =
-{
+static const struct baco_cmd_entry enter_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
@@ -126,8 +120,7 @@ static const struct baco_cmd_entry enter_baco_tbl[] =
#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
-static const struct baco_cmd_entry exit_baco_tbl[] =
-{
+static const struct baco_cmd_entry exit_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
@@ -142,14 +135,12 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
};
-static const struct baco_cmd_entry clean_baco_tbl[] =
-{
+static const struct baco_cmd_entry clean_baco_tbl[] = {
{ CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
{ CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
};
-static const struct baco_cmd_entry use_bclk_tbl_vg[] =
-{
+static const struct baco_cmd_entry use_bclk_tbl_vg[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
@@ -160,8 +151,7 @@ static const struct baco_cmd_entry use_bclk_tbl_vg[] =
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
};
-static const struct baco_cmd_entry turn_off_plls_tbl_vg[] =
-{
+static const struct baco_cmd_entry turn_off_plls_tbl_vg[] = {
{ CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
{ CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
{ CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
index b3103bd4be42..1f987e846628 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
@@ -278,16 +278,14 @@ struct pp_atom_ctrl__avfs_parameters {
uint8_t ucReserved;
};
-struct _AtomCtrl_HiLoLeakageOffsetTable
-{
+struct _AtomCtrl_HiLoLeakageOffsetTable {
USHORT usHiLoLeakageThreshold;
USHORT usEdcDidtLoDpm7TableOffset;
USHORT usEdcDidtHiDpm7TableOffset;
};
typedef struct _AtomCtrl_HiLoLeakageOffsetTable AtomCtrl_HiLoLeakageOffsetTable;
-struct _AtomCtrl_EDCLeakgeTable
-{
+struct _AtomCtrl_EDCLeakgeTable {
ULONG DIDT_REG[24];
};
typedef struct _AtomCtrl_EDCLeakgeTable AtomCtrl_EDCLeakgeTable;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
index 2fc1733bcdcf..e86e05c786d9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
@@ -147,8 +147,7 @@ struct pp_atomfwctrl_bios_boot_up_values {
uint8_t ucCoolingID;
};
-struct pp_atomfwctrl_smc_dpm_parameters
-{
+struct pp_atomfwctrl_smc_dpm_parameters {
uint8_t liquid1_i2c_address;
uint8_t liquid2_i2c_address;
uint8_t vr_i2c_address;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
index dac29fe6cfc6..6f54c410c2f9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
@@ -166,7 +166,7 @@ static fInt fNaturalLog(fInt value)
error_term = fAdd(fNegativeOne, value);
- return (fAdd(solution, error_term));
+ return fAdd(solution, error_term);
}
static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength)
@@ -230,7 +230,7 @@ static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possib
static fInt fNegate(fInt X)
{
fInt CONSTANT_NEGONE = ConvertToFraction(-1);
- return (fMultiply(X, CONSTANT_NEGONE));
+ return fMultiply(X, CONSTANT_NEGONE);
}
static fInt Convert_ULONG_ToFraction(uint32_t X)
@@ -382,14 +382,14 @@ static int ConvertBackToInteger (fInt A) /*THIS is the function that will be use
scaledDecimal.full = uGetScaledDecimal(A);
- fullNumber = fAdd(scaledDecimal,scaledReal);
+ fullNumber = fAdd(scaledDecimal, scaledReal);
return fullNumber.full;
}
static fInt fGetSquare(fInt A)
{
- return fMultiply(A,A);
+ return fMultiply(A, A);
}
/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */
@@ -447,7 +447,7 @@ static fInt fSqrt(fInt num)
} while (uAbs(error) > 0);
- return (x_new);
+ return x_new;
}
static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
@@ -459,7 +459,7 @@ static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
f_CONSTANT100 = ConvertToFraction(100);
f_CONSTANT10 = ConvertToFraction(10);
- while(GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) {
+ while (GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) {
A = fDivide(A, f_CONSTANT10);
B = fDivide(B, f_CONSTANT10);
C = fDivide(C, f_CONSTANT10);
@@ -515,7 +515,7 @@ static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole
dec[i] = tmp / (1 << SHIFT_AMOUNT);
tmp = tmp - ((1 << SHIFT_AMOUNT)*dec[i]);
tmp *= 10;
- scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 -i);
+ scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 - i);
}
return scaledDecimal;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
index b0ac4d121adc..7a31cfa5e7fb 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
@@ -419,8 +419,7 @@ typedef struct _ATOM_Fiji_PowerTune_Table {
USHORT usReserved;
} ATOM_Fiji_PowerTune_Table;
-typedef struct _ATOM_Polaris_PowerTune_Table
-{
+typedef struct _ATOM_Polaris_PowerTune_Table {
UCHAR ucRevId;
USHORT usTDP;
USHORT usConfigurableTDP;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
index 182118e3fd5f..5794b64507bf 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
@@ -1237,7 +1237,7 @@ static int get_vce_clock_voltage_limit_table(struct pp_hwmgr *hwmgr,
const VCEClockInfoArray *array)
{
unsigned long i;
- struct phm_vce_clock_voltage_dependency_table *vce_table = NULL;
+ struct phm_vce_clock_voltage_dependency_table *vce_table;
vce_table = kzalloc(struct_size(vce_table, entries, table->numEntries),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 86d6e88c7386..02ba68d7c654 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -430,37 +430,37 @@ static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
/* temporary hardcoded clock voltage breakdown tables */
-static const DpmClock_t VddDcfClk[]= {
+static const DpmClock_t VddDcfClk[] = {
{ 300, 2600},
{ 600, 3200},
{ 600, 3600},
};
-static const DpmClock_t VddSocClk[]= {
+static const DpmClock_t VddSocClk[] = {
{ 478, 2600},
{ 722, 3200},
{ 722, 3600},
};
-static const DpmClock_t VddFClk[]= {
+static const DpmClock_t VddFClk[] = {
{ 400, 2600},
{1200, 3200},
{1200, 3600},
};
-static const DpmClock_t VddDispClk[]= {
+static const DpmClock_t VddDispClk[] = {
{ 435, 2600},
{ 661, 3200},
{1086, 3600},
};
-static const DpmClock_t VddDppClk[]= {
+static const DpmClock_t VddDppClk[] = {
{ 435, 2600},
{ 661, 3200},
{ 661, 3600},
};
-static const DpmClock_t VddPhyClk[]= {
+static const DpmClock_t VddPhyClk[] = {
{ 540, 2600},
{ 810, 3200},
{ 810, 3600},
@@ -1358,7 +1358,7 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
struct amdgpu_device *adev = hwmgr->adev;
int i;
- smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
+ smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
for (i = 0; i < NUM_WM_RANGES; i++)
@@ -1461,7 +1461,7 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
phm_get_sysfs_buf(&buf, &size);
- size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0],
+ size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n", title[0],
title[1], title[2], title[3], title[4], title[5]);
for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index e10cc5e7928e..5a2371484a58 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -83,15 +83,15 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
-static struct profile_mode_setting smu7_profiling[7] =
- {{0, 0, 0, 0, 0, 0, 0, 0},
+static struct profile_mode_setting smu7_profiling[7] = {
+ {0, 0, 0, 0, 0, 0, 0, 0},
{1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 10, 16, 31},
{1, 0, 11, 50, 1, 0, 100, 10},
{1, 0, 5, 30, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0},
- };
+};
#define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310)
@@ -904,7 +904,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
dep_sclk_table->entries[i].clk;
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
+ i == 0;
data->dpm_table.sclk_table.count++;
}
}
@@ -919,7 +919,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
dep_mclk_table->entries[i].clk;
data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
+ i == 0;
data->dpm_table.mclk_table.count++;
}
}
@@ -950,7 +950,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
odn_table->odn_core_clock_dpm_levels.num_of_pl =
data->golden_dpm_table.sclk_table.count;
entries = odn_table->odn_core_clock_dpm_levels.entries;
- for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
+ for (i = 0; i < data->golden_dpm_table.sclk_table.count; i++) {
entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
entries[i].enabled = true;
entries[i].vddc = dep_sclk_table->entries[i].vddc;
@@ -962,7 +962,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
odn_table->odn_memory_clock_dpm_levels.num_of_pl =
data->golden_dpm_table.mclk_table.count;
entries = odn_table->odn_memory_clock_dpm_levels.entries;
- for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
+ for (i = 0; i < data->golden_dpm_table.mclk_table.count; i++) {
entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
entries[i].enabled = true;
entries[i].vddc = dep_mclk_table->entries[i].vddc;
@@ -1798,17 +1798,6 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result;
}
-static bool intel_core_rkl_chk(void)
-{
-#if IS_ENABLED(CONFIG_X86_64)
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE);
-#else
- return false;
-#endif
-}
-
static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1824,18 +1813,19 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
+ data->voting_rights_clients[1] = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
+ data->voting_rights_clients[3] = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
+ data->voting_rights_clients[4] = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
+ data->voting_rights_clients[5] = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
+ data->voting_rights_clients[6] = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
+ data->voting_rights_clients[7] = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
data->pcie_dpm_key_disabled =
- intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
+ !amdgpu_device_pcie_dynamic_switching_supported() ||
+ !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
/* need to set voltage control types before EVV patching */
data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
@@ -2012,7 +2002,7 @@ static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr)
} else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
min = 900;
- max= 2100;
+ max = 2100;
} else if (hwmgr->chip_id == CHIP_POLARIS10) {
if (adev->pdev->subsystem_vendor == 0x106B) {
min = 1000;
@@ -4028,7 +4018,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
(idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
- AverageGraphicsActivity:
+ AverageGraphicsActivity :
AverageMemoryActivity);
activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
@@ -4049,7 +4039,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
*size = 4;
return 0;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
case AMDGPU_PP_SENSOR_VDDGFX:
if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
@@ -5432,6 +5422,8 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->sw_ctf_threshold = thermal_data->max;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c
index 32a5a00fd8ae..65001bed0a9a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c
@@ -520,8 +520,7 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
{ 0xFFFFFFFF }
};
-static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] =
-{
+static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value Type
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -646,7 +645,7 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] =
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
@@ -666,8 +665,7 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct gpu_pt_config_reg GCCACConfig_VegaM[] =
-{
+static const struct gpu_pt_config_reg GCCACConfig_VegaM[] = {
// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
// Offset Mask Shift Value Type
// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -703,8 +701,7 @@ static const struct gpu_pt_config_reg GCCACConfig_VegaM[] =
{ 0xFFFFFFFF } // End of list
};
-static const struct gpu_pt_config_reg DIDTConfig_VegaM[] =
-{
+static const struct gpu_pt_config_reg DIDTConfig_VegaM[] = {
// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
// Offset Mask Shift Value Type
// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -831,7 +828,7 @@ static const struct gpu_pt_config_reg DIDTConfig_VegaM[] =
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
@@ -973,7 +970,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
mutex_lock(&adev->grbm_idx_mutex);
value = 0;
value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
@@ -1048,13 +1045,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
}
mutex_unlock(&adev->grbm_idx_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
return 0;
error:
mutex_unlock(&adev->grbm_idx_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return result;
}
@@ -1068,7 +1065,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
result = smu7_enable_didt(hwmgr, false);
PP_ASSERT_WITH_CODE((result == 0),
@@ -1081,12 +1078,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", goto error);
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
return 0;
error:
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return result;
}
@@ -1103,7 +1100,7 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable CAC in SMC.", result = -1);
- data->cac_enabled = (0 == smc_result) ? true : false;
+ data->cac_enabled = smc_result == 0;
}
return result;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
index bfe80ac0ad8c..79a566f3564a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
@@ -603,21 +603,17 @@ int phm_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
- /*
- * SW CTF just occurred.
- * Try to do a graceful shutdown to prevent further damage.
- */
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
- orderly_poweroff(true);
- } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
+ schedule_delayed_work(&hwmgr->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
+ } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) {
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
- else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
+ } else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
/*
* HW CTF just occurred. Shutdown to prevent further damage.
@@ -626,15 +622,10 @@ int phm_irq_process(struct amdgpu_device *adev,
orderly_poweroff(true);
}
} else if (client_id == SOC15_IH_CLIENTID_THM) {
- if (src_id == 0) {
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
- /*
- * SW CTF just occurred.
- * Try to do a graceful shutdown to prevent further damage.
- */
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
- orderly_poweroff(true);
- } else
+ if (src_id == 0)
+ schedule_delayed_work(&hwmgr->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
+ else
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
} else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
@@ -705,7 +696,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
return -EINVAL);
dep_table->count = allowed_dep_table->count;
- for (i=0; i<dep_table->count; i++) {
+ for (i = 0; i < dep_table->count; i++) {
dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
index 2a75da1e9f03..83b3c9315143 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
@@ -194,7 +194,7 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
(fieldval) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field) )
+ PHM_FIELD_MASK(reg, field))
#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c
index ea743bea8e29..432d4fd2a0ba 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c
@@ -36,8 +36,7 @@
#include "smu/smu_7_1_2_sh_mask.h"
-static const struct baco_cmd_entry gpio_tbl[] =
-{
+static const struct baco_cmd_entry gpio_tbl[] = {
{ CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
@@ -50,15 +49,13 @@ static const struct baco_cmd_entry gpio_tbl[] =
{ CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
};
-static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
-{
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
{ CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
};
-static const struct baco_cmd_entry use_bclk_tbl[] =
-{
+static const struct baco_cmd_entry use_bclk_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
@@ -80,8 +77,7 @@ static const struct baco_cmd_entry use_bclk_tbl[] =
{ CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
};
-static const struct baco_cmd_entry turn_off_plls_tbl[] =
-{
+static const struct baco_cmd_entry turn_off_plls_tbl[] = {
{ CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
@@ -112,8 +108,7 @@ static const struct baco_cmd_entry turn_off_plls_tbl[] =
{ CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
};
-static const struct baco_cmd_entry enter_baco_tbl[] =
-{
+static const struct baco_cmd_entry enter_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
@@ -130,8 +125,7 @@ static const struct baco_cmd_entry enter_baco_tbl[] =
#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
-static const struct baco_cmd_entry exit_baco_tbl[] =
-{
+static const struct baco_cmd_entry exit_baco_tbl[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
@@ -146,22 +140,19 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
};
-static const struct baco_cmd_entry clean_baco_tbl[] =
-{
+static const struct baco_cmd_entry clean_baco_tbl[] = {
{ CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
{ CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
};
-static const struct baco_cmd_entry gpio_tbl_iceland[] =
-{
+static const struct baco_cmd_entry gpio_tbl_iceland[] = {
{ CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
{ CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }
};
-static const struct baco_cmd_entry exit_baco_tbl_iceland[] =
-{
+static const struct baco_cmd_entry exit_baco_tbl_iceland[] = {
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
@@ -177,8 +168,7 @@ static const struct baco_cmd_entry exit_baco_tbl_iceland[] =
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
};
-static const struct baco_cmd_entry clean_baco_tbl_iceland[] =
-{
+static const struct baco_cmd_entry clean_baco_tbl_iceland[] = {
{ CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
};
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c
index 46bb16c29cf6..6836e98d37be 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c
@@ -31,24 +31,22 @@
-static const struct soc15_baco_cmd_entry pre_baco_tbl[] =
-{
+static const struct soc15_baco_cmd_entry pre_baco_tbl[] = {
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIF_DOORBELL_CNTL), BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT, 0, 1},
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIF_FB_EN), 0, 0, 0, 0},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DSTATE_BYPASS_MASK, BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT, 0, 1},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_RST_INTR_MASK_MASK, BACO_CNTL__BACO_RST_INTR_MASK__SHIFT, 0, 1}
};
-static const struct soc15_baco_cmd_entry enter_baco_tbl[] =
-{
+static const struct soc15_baco_cmd_entry enter_baco_tbl[] = {
{CMD_WAITFOR, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__SOC_DOMAIN_IDLE_MASK, THM_BACO_CNTL__SOC_DOMAIN_IDLE__SHIFT, 0xffffffff, 0x80000000},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 1},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 1},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 1},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 1},
- {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT,0, 1},
+ {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 1},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 1},
- {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT,0, 1},
+ {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT, 0, 1},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 1},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 1},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 1},
@@ -58,13 +56,12 @@ static const struct soc15_baco_cmd_entry enter_baco_tbl[] =
{CMD_WAITFOR, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_MODE_MASK, BACO_CNTL__BACO_MODE__SHIFT, 0xffffffff, 0x100}
};
-static const struct soc15_baco_cmd_entry exit_baco_tbl[] =
-{
+static const struct soc15_baco_cmd_entry exit_baco_tbl[] = {
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0},
- {CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 10,0},
- {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0,0},
+ {CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 10, 0},
+ {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 0},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 0},
- {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT,0, 0},
+ {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT, 0, 0},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 0},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 1},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 0},
@@ -74,13 +71,12 @@ static const struct soc15_baco_cmd_entry exit_baco_tbl[] =
{CMD_WAITFOR, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_EXIT_MASK, 0, 0xffffffff, 0},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SB_AXI_FENCE_MASK, THM_BACO_CNTL__BACO_SB_AXI_FENCE__SHIFT, 0, 0},
{CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 0},
- {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK ,BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 0},
- {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_EN_MASK , BACO_CNTL__BACO_EN__SHIFT, 0,0},
+ {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 0},
+ {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0},
{CMD_WAITFOR, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0}
};
-static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
-{
+static const struct soc15_baco_cmd_entry clean_baco_tbl[] = {
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_6), 0, 0, 0, 0},
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0},
};
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 99cd2e63afdd..6d6bc6a380b3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -1375,8 +1375,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
dep_mm_table->entries[i].eclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].eclk;
- dpm_table->dpm_levels[dpm_table->count].enabled =
- (i == 0) ? true : false;
+ dpm_table->dpm_levels[dpm_table->count].enabled = i == 0;
dpm_table->count++;
}
}
@@ -1391,8 +1390,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
dep_mm_table->entries[i].vclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].vclk;
- dpm_table->dpm_levels[dpm_table->count].enabled =
- (i == 0) ? true : false;
+ dpm_table->dpm_levels[dpm_table->count].enabled = i == 0;
dpm_table->count++;
}
}
@@ -1405,8 +1403,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
dep_mm_table->entries[i].dclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].dclk;
- dpm_table->dpm_levels[dpm_table->count].enabled =
- (i == 0) ? true : false;
+ dpm_table->dpm_levels[dpm_table->count].enabled = i == 0;
dpm_table->count++;
}
}
@@ -3969,7 +3966,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value);
break;
case AMDGPU_PP_SENSOR_VDDGFX:
@@ -5241,6 +5238,9 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
{
struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+ struct phm_ppt_v2_information *pp_table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+ struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
@@ -5257,6 +5257,13 @@ static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ if (tdp_table->usSoftwareShutdownTemp > pp_table->ThotspotLimit &&
+ tdp_table->usSoftwareShutdownTemp < VEGA10_THERMAL_MAXIMUM_ALERT_TEMP)
+ thermal_data->sw_ctf_threshold = tdp_table->usSoftwareShutdownTemp;
+ else
+ thermal_data->sw_ctf_threshold = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP;
+ thermal_data->sw_ctf_threshold *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
index 9757d47dd6b8..3007b054c873 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
@@ -30,8 +30,7 @@
#include "pp_debug.h"
#include "soc15_common.h"
-static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -55,8 +54,7 @@ static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SEDiDtCtrl3Config_vega10[] =
-{
+static const struct vega10_didt_config_reg SEDiDtCtrl3Config_vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -120,8 +118,7 @@ static const struct vega10_didt_config_reg SEDiDtCtrl3Config_vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SEDiDtCtrl2Config_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEDiDtCtrl2Config_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -149,8 +146,7 @@ static const struct vega10_didt_config_reg SEDiDtCtrl2Config_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SEDiDtCtrl1Config_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEDiDtCtrl1Config_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -172,8 +168,7 @@ static const struct vega10_didt_config_reg SEDiDtCtrl1Config_Vega10[] =
};
-static const struct vega10_didt_config_reg SEDiDtWeightConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEDiDtWeightConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -201,8 +196,7 @@ static const struct vega10_didt_config_reg SEDiDtWeightConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SEDiDtCtrl0Config_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEDiDtCtrl0Config_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -260,8 +254,7 @@ static const struct vega10_didt_config_reg SEDiDtCtrl0Config_Vega10[] =
};
-static const struct vega10_didt_config_reg SEDiDtStallCtrlConfig_vega10[] =
-{
+static const struct vega10_didt_config_reg SEDiDtStallCtrlConfig_vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -293,8 +286,7 @@ static const struct vega10_didt_config_reg SEDiDtStallCtrlConfig_vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SEDiDtStallPatternConfig_vega10[] =
-{
+static const struct vega10_didt_config_reg SEDiDtStallPatternConfig_vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -362,8 +354,7 @@ static const struct vega10_didt_config_reg SEDiDtStallPatternConfig_vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SELCacConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg SELCacConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -401,8 +392,7 @@ static const struct vega10_didt_config_reg SELCacConfig_Vega10[] =
};
-static const struct vega10_didt_config_reg SEEDCStallPatternConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEEDCStallPatternConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -431,8 +421,7 @@ static const struct vega10_didt_config_reg SEEDCStallPatternConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SEEDCForceStallPatternConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEEDCForceStallPatternConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -451,8 +440,7 @@ static const struct vega10_didt_config_reg SEEDCForceStallPatternConfig_Vega10[]
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SEEDCStallDelayConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEEDCStallDelayConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -478,8 +466,7 @@ static const struct vega10_didt_config_reg SEEDCStallDelayConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SEEDCThresholdConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEEDCThresholdConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -492,8 +479,7 @@ static const struct vega10_didt_config_reg SEEDCThresholdConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SEEDCCtrlResetConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEEDCCtrlResetConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -514,8 +500,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlResetConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SEEDCCtrlConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEEDCCtrlConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -536,8 +521,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -571,8 +555,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg GCDiDtDroopCtrlConfig_vega10[] =
-{
+static const struct vega10_didt_config_reg GCDiDtDroopCtrlConfig_vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -586,8 +569,7 @@ static const struct vega10_didt_config_reg GCDiDtDroopCtrlConfig_vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg GCDiDtCtrl0Config_vega10[] =
-{
+static const struct vega10_didt_config_reg GCDiDtCtrl0Config_vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -601,8 +583,7 @@ static const struct vega10_didt_config_reg GCDiDtCtrl0Config_vega10[] =
};
-static const struct vega10_didt_config_reg PSMSEEDCStallPatternConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg PSMSEEDCStallPatternConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -621,8 +602,7 @@ static const struct vega10_didt_config_reg PSMSEEDCStallPatternConfig_Vega10[]
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -651,8 +631,7 @@ static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -673,8 +652,7 @@ static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] =
-{
+static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -695,8 +673,7 @@ static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] =
-{
+static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -710,8 +687,7 @@ static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMGCEDCCtrlResetConfig_vega10[] =
-{
+static const struct vega10_didt_config_reg PSMGCEDCCtrlResetConfig_vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -726,8 +702,7 @@ static const struct vega10_didt_config_reg PSMGCEDCCtrlResetConfig_vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMGCEDCCtrlConfig_vega10[] =
-{
+static const struct vega10_didt_config_reg PSMGCEDCCtrlConfig_vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -742,8 +717,7 @@ static const struct vega10_didt_config_reg PSMGCEDCCtrlConfig_vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg AvfsPSMResetConfig_vega10[]=
-{
+static const struct vega10_didt_config_reg AvfsPSMResetConfig_vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -756,8 +730,7 @@ static const struct vega10_didt_config_reg AvfsPSMResetConfig_vega10[]=
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg AvfsPSMInitConfig_vega10[] =
-{
+static const struct vega10_didt_config_reg AvfsPSMInitConfig_vega10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -915,11 +888,11 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | (count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
@@ -940,7 +913,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -949,11 +922,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
vega10_didt_set_mask(hwmgr, false);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -966,11 +939,11 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | (count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
@@ -985,7 +958,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
if (PP_CAP(PHM_PlatformCaps_GCEDC))
@@ -1002,11 +975,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t data;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
vega10_didt_set_mask(hwmgr, false);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
@@ -1027,11 +1000,11 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | (count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
@@ -1048,7 +1021,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -1057,11 +1030,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
vega10_didt_set_mask(hwmgr, false);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -1075,13 +1048,13 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
- data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | (count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
result = vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
@@ -1096,7 +1069,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
@@ -1116,11 +1089,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t data;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
vega10_didt_set_mask(hwmgr, false);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
@@ -1138,7 +1111,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
int result;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
mutex_lock(&adev->grbm_idx_mutex);
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
@@ -1151,7 +1124,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, false);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
index 9c479bd9a786..8b0590b834cc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
@@ -317,16 +317,14 @@ typedef struct _ATOM_Vega10_Thermal_Controller {
UCHAR ucFlags; /* to be defined */
} ATOM_Vega10_Thermal_Controller;
-typedef struct _ATOM_Vega10_VCE_State_Record
-{
+typedef struct _ATOM_Vega10_VCE_State_Record {
UCHAR ucVCEClockIndex; /*index into usVCEDependencyTableOffset of 'ATOM_Vega10_MM_Dependency_Table' type */
UCHAR ucFlag; /* 2 bits indicates memory p-states */
UCHAR ucSCLKIndex; /* index into ATOM_Vega10_SCLK_Dependency_Table */
UCHAR ucMCLKIndex; /* index into ATOM_Vega10_MCLK_Dependency_Table */
} ATOM_Vega10_VCE_State_Record;
-typedef struct _ATOM_Vega10_VCE_State_Table
-{
+typedef struct _ATOM_Vega10_VCE_State_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
ATOM_Vega10_VCE_State_Record entries[1];
@@ -361,8 +359,7 @@ typedef struct _ATOM_Vega10_PowerTune_Table {
USHORT usTemperatureLimitTedge;
} ATOM_Vega10_PowerTune_Table;
-typedef struct _ATOM_Vega10_PowerTune_Table_V2
-{
+typedef struct _ATOM_Vega10_PowerTune_Table_V2 {
UCHAR ucRevId;
USHORT usSocketPowerLimit;
USHORT usBatteryPowerLimit;
@@ -388,8 +385,7 @@ typedef struct _ATOM_Vega10_PowerTune_Table_V2
USHORT usTemperatureLimitTedge;
} ATOM_Vega10_PowerTune_Table_V2;
-typedef struct _ATOM_Vega10_PowerTune_Table_V3
-{
+typedef struct _ATOM_Vega10_PowerTune_Table_V3 {
UCHAR ucRevId;
USHORT usSocketPowerLimit;
USHORT usBatteryPowerLimit;
@@ -428,15 +424,13 @@ typedef struct _ATOM_Vega10_Hard_Limit_Record {
USHORT usVddMemLimit;
} ATOM_Vega10_Hard_Limit_Record;
-typedef struct _ATOM_Vega10_Hard_Limit_Table
-{
+typedef struct _ATOM_Vega10_Hard_Limit_Table {
UCHAR ucRevId;
UCHAR ucNumEntries;
ATOM_Vega10_Hard_Limit_Record entries[1];
} ATOM_Vega10_Hard_Limit_Table;
-typedef struct _Vega10_PPTable_Generic_SubTable_Header
-{
+typedef struct _Vega10_PPTable_Generic_SubTable_Header {
UCHAR ucRevId;
} Vega10_PPTable_Generic_SubTable_Header;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
index bb90d8abf79b..3be616af327e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
@@ -372,9 +372,9 @@ static int get_mm_clock_voltage_table(
return 0;
}
-static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
{
- switch(line){
+ switch (line) {
case Vega10_I2CLineID_DDC1:
*scl = Vega10_I2C_DDC1CLK;
*sda = Vega10_I2C_DDC1DATA;
@@ -954,7 +954,7 @@ static int init_powerplay_extended_tables(
if (!result && powerplay_table->usPixclkDependencyTableOffset)
result = get_pix_clk_voltage_dependency_table(hwmgr,
&pp_table_info->vdd_dep_on_pixclk,
- (const ATOM_Vega10_PIXCLK_Dependency_Table*)
+ (const ATOM_Vega10_PIXCLK_Dependency_Table *)
pixclk_dep_table);
if (!result && powerplay_table->usPhyClkDependencyTableOffset)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c
index bc53cce4f32d..32cc8de296e4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c
@@ -29,16 +29,14 @@
#include "vega12_ppsmc.h"
#include "vega12_baco.h"
-static const struct soc15_baco_cmd_entry pre_baco_tbl[] =
-{
+static const struct soc15_baco_cmd_entry pre_baco_tbl[] = {
{ CMD_READMODIFYWRITE, NBIF_HWID, 0, mmBIF_DOORBELL_CNTL_BASE_IDX, mmBIF_DOORBELL_CNTL, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT, 0, 0 },
{ CMD_WRITE, NBIF_HWID, 0, mmBIF_FB_EN_BASE_IDX, mmBIF_FB_EN, 0, 0, 0, 0 },
{ CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_DSTATE_BYPASS_MASK, BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT, 0, 1 },
{ CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_RST_INTR_MASK_MASK, BACO_CNTL__BACO_RST_INTR_MASK__SHIFT, 0, 1 }
};
-static const struct soc15_baco_cmd_entry enter_baco_tbl[] =
-{
+static const struct soc15_baco_cmd_entry enter_baco_tbl[] = {
{ CMD_WAITFOR, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__SOC_DOMAIN_IDLE_MASK, THM_BACO_CNTL__SOC_DOMAIN_IDLE__SHIFT, 0xffffffff, 0x80000000 },
{ CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 1 },
{ CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 1 },
@@ -56,8 +54,7 @@ static const struct soc15_baco_cmd_entry enter_baco_tbl[] =
{ CMD_WAITFOR, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, BACO_CNTL__BACO_MODE__SHIFT, 0xffffffff, 0x100 }
};
-static const struct soc15_baco_cmd_entry exit_baco_tbl[] =
-{
+static const struct soc15_baco_cmd_entry exit_baco_tbl[] = {
{ CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0 },
{ CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 10, 0 },
{ CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 0 },
@@ -77,8 +74,7 @@ static const struct soc15_baco_cmd_entry exit_baco_tbl[] =
{ CMD_WAITFOR, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0 }
};
-static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
-{
+static const struct soc15_baco_cmd_entry clean_baco_tbl[] = {
{ CMD_WRITE, NBIF_HWID, 0, mmBIOS_SCRATCH_6_BASE_IDX, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
{ CMD_WRITE, NBIF_HWID, 0, mmBIOS_SCRATCH_7_BASE_IDX, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
};
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index e9db137cd1c6..460067933de2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -1529,7 +1529,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
if (!ret)
*size = 4;
@@ -1623,13 +1623,13 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
clock_req.clock_type = amd_pp_dcef_clock;
- clock_req.clock_freq_in_khz = min_clocks.dcefClock/10;
+ clock_req.clock_freq_in_khz = min_clocks.dcefClock / 10;
if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
if (data->smu_features[GNLD_DS_DCEFCLK].supported)
PP_ASSERT_WITH_CODE(
!smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcefClockInSR /100,
+ min_clocks.dcefClockInSR / 100,
NULL),
"Attempt to set divider for DCEFCLK Failed!",
return -1);
@@ -2354,8 +2354,8 @@ static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
uint32_t i, latency;
disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
- !hwmgr->display_config->multi_monitor_in_sync) ||
- vblank_too_short;
+ !hwmgr->display_config->multi_monitor_in_sync) ||
+ vblank_too_short;
latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
/* gfxclk */
@@ -2522,7 +2522,7 @@ static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
NULL)),
"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
return ret);
@@ -2763,6 +2763,8 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
@@ -2781,6 +2783,8 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h
index aa63ae41942d..9f2ce4308548 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h
@@ -38,8 +38,7 @@
#define VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8
#define VG12_PSUEDO_NUM_UCLK_DPM_LEVELS 4
-enum
-{
+enum {
GNLD_DPM_PREFETCHER = 0,
GNLD_DPM_GFXCLK,
GNLD_DPM_UCLK,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h
index bf4f5095b80d..9b8435a4d306 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h
@@ -72,8 +72,7 @@ enum ATOM_VEGA12_PPCLOCK_ID {
typedef enum ATOM_VEGA12_PPCLOCK_ID ATOM_VEGA12_PPCLOCK_ID;
-typedef struct _ATOM_VEGA12_POWERPLAYTABLE
-{
+typedef struct _ATOM_VEGA12_POWERPLAYTABLE {
struct atom_common_table_header sHeader;
UCHAR ucTableRevision;
USHORT usTableSize;
@@ -92,11 +91,11 @@ typedef struct _ATOM_VEGA12_POWERPLAYTABLE
USHORT usODPowerSavePowerLimit;
USHORT usSoftwareShutdownTemp;
- ULONG PowerSavingClockMax [ATOM_VEGA12_PPCLOCK_COUNT];
- ULONG PowerSavingClockMin [ATOM_VEGA12_PPCLOCK_COUNT];
+ ULONG PowerSavingClockMax[ATOM_VEGA12_PPCLOCK_COUNT];
+ ULONG PowerSavingClockMin[ATOM_VEGA12_PPCLOCK_COUNT];
- ULONG ODSettingsMax [ATOM_VEGA12_ODSETTING_COUNT];
- ULONG ODSettingsMin [ATOM_VEGA12_ODSETTING_COUNT];
+ ULONG ODSettingsMax[ATOM_VEGA12_ODSETTING_COUNT];
+ ULONG ODSettingsMin[ATOM_VEGA12_ODSETTING_COUNT];
USHORT usReserve[5];
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
index ed3dff0b52d2..ae342c58cd3e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
@@ -192,7 +192,9 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
- val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ val &= ~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK;
+ val &= ~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+ val &= ~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
index 8d99c7a5abf8..994c0d374bfa 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
@@ -31,8 +31,7 @@
#include "amdgpu_ras.h"
-static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
-{
+static const struct soc15_baco_cmd_entry clean_baco_tbl[] = {
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_6), 0, 0, 0, 0},
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0},
};
@@ -90,11 +89,11 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
- if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ if (smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnterBaco, 0, NULL))
return -EINVAL;
} else {
- if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ if (smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnterBaco, 1, NULL))
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 0d4d4811527c..3b33af30eb0f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -1402,7 +1402,7 @@ static int vega20_od8_set_settings(
"Failed to export over drive table!",
return ret);
- switch(index) {
+ switch (index) {
case OD8_SETTING_GFXCLK_FMIN:
od_table.GfxclkFmin = (uint16_t)value;
break;
@@ -2129,7 +2129,7 @@ static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr,
return ret;
}
-static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
+static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, int idx,
uint32_t *query)
{
int ret = 0;
@@ -2140,10 +2140,17 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
return ret;
/* For the 40.46 release, they changed the value name */
- if (hwmgr->smu_version == 0x282e00)
- *query = metrics_table.AverageSocketPower << 8;
- else
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ if (hwmgr->smu_version == 0x282e00)
+ *query = metrics_table.AverageSocketPower << 8;
+ else
+ ret = -EOPNOTSUPP;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
*query = metrics_table.CurrSocketPower << 8;
+ break;
+ }
return ret;
}
@@ -2253,9 +2260,10 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
*size = 16;
- ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value);
+ ret = vega20_get_gpu_power(hwmgr, idx, (uint32_t *)value);
break;
case AMDGPU_PP_SENSOR_VDDGFX:
val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
@@ -2360,7 +2368,7 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
NULL)),
"[SetHardMinFreq] Set hard min uclk failed!",
return ret);
@@ -3579,7 +3587,7 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
NULL)),
"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
return ret);
@@ -3605,7 +3613,7 @@ static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level,
+ (PPCLK_FCLK << 16) | dpm_table->dpm_state.soft_min_level,
NULL)),
"[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
return ret);
@@ -3727,8 +3735,8 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
uint32_t i, latency;
disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
- !hwmgr->display_config->multi_monitor_in_sync) ||
- vblank_too_short;
+ !hwmgr->display_config->multi_monitor_in_sync) ||
+ vblank_too_short;
latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
/* gfxclk */
@@ -4206,6 +4214,8 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
@@ -4224,6 +4234,8 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->sw_ctf_threshold = pptable_information->us_software_shutdown_temp *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h
index 075c0094da9c..1ba9b5fe2a5d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h
@@ -385,8 +385,7 @@ struct vega20_odn_data {
struct vega20_odn_temp_table odn_temp_table;
};
-enum OD8_FEATURE_ID
-{
+enum OD8_FEATURE_ID {
OD8_GFXCLK_LIMITS = 1 << 0,
OD8_GFXCLK_CURVE = 1 << 1,
OD8_UCLK_MAX = 1 << 2,
@@ -399,8 +398,7 @@ enum OD8_FEATURE_ID
OD8_FAN_ZERO_RPM_CONTROL = 1 << 9
};
-enum OD8_SETTING_ID
-{
+enum OD8_SETTING_ID {
OD8_SETTING_GFXCLK_FMIN = 0,
OD8_SETTING_GFXCLK_FMAX,
OD8_SETTING_GFXCLK_FREQ1,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h
index 2222e29405c6..b468dddbefff 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h
@@ -73,14 +73,13 @@ enum ATOM_VEGA20_ODSETTING_ID {
};
typedef enum ATOM_VEGA20_ODSETTING_ID ATOM_VEGA20_ODSETTING_ID;
-typedef struct _ATOM_VEGA20_OVERDRIVE8_RECORD
-{
+typedef struct _ATOM_VEGA20_OVERDRIVE8_RECORD {
UCHAR ucODTableRevision;
ULONG ODFeatureCount;
- UCHAR ODFeatureCapabilities [ATOM_VEGA20_ODFEATURE_MAX_COUNT]; //OD feature support flags
+ UCHAR ODFeatureCapabilities[ATOM_VEGA20_ODFEATURE_MAX_COUNT]; //OD feature support flags
ULONG ODSettingCount;
- ULONG ODSettingsMax [ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Upper Limit for each OD Setting
- ULONG ODSettingsMin [ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Lower Limit for each OD Setting
+ ULONG ODSettingsMax[ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Upper Limit for each OD Setting
+ ULONG ODSettingsMin[ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Lower Limit for each OD Setting
} ATOM_VEGA20_OVERDRIVE8_RECORD;
enum ATOM_VEGA20_PPCLOCK_ID {
@@ -99,16 +98,14 @@ enum ATOM_VEGA20_PPCLOCK_ID {
};
typedef enum ATOM_VEGA20_PPCLOCK_ID ATOM_VEGA20_PPCLOCK_ID;
-typedef struct _ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD
-{
+typedef struct _ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD {
UCHAR ucTableRevision;
ULONG PowerSavingClockCount; // Count of PowerSavingClock Mode
- ULONG PowerSavingClockMax [ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Maximum array In MHz
- ULONG PowerSavingClockMin [ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Minimum array In MHz
+ ULONG PowerSavingClockMax[ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Maximum array In MHz
+ ULONG PowerSavingClockMin[ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Minimum array In MHz
} ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD;
-typedef struct _ATOM_VEGA20_POWERPLAYTABLE
-{
+typedef struct _ATOM_VEGA20_POWERPLAYTABLE {
struct atom_common_table_header sHeader;
UCHAR ucTableRevision;
USHORT usTableSize;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
index f4f4efdbda79..e9737ca8418a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
@@ -263,7 +263,9 @@ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
- val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+ val &= ~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK;
+ val &= ~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+ val &= ~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
index 01a7d66864f2..f4f9a104d170 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
@@ -44,8 +44,7 @@ struct phm_fan_speed_info {
};
/* Automatic Power State Throttling */
-enum PHM_AutoThrottleSource
-{
+enum PHM_AutoThrottleSource {
PHM_AutoThrottleSource_Thermal,
PHM_AutoThrottleSource_External
};
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 5ce433e2c16a..81650727a5de 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -190,8 +190,7 @@ struct phm_vce_clock_voltage_dependency_table {
};
-enum SMU_ASIC_RESET_MODE
-{
+enum SMU_ASIC_RESET_MODE {
SMU_ASIC_RESET_MODE_0,
SMU_ASIC_RESET_MODE_1,
SMU_ASIC_RESET_MODE_2,
@@ -359,7 +358,7 @@ struct pp_hwmgr_func {
int (*set_ppfeature_status)(struct pp_hwmgr *hwmgr, uint64_t ppfeature_masks);
int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state);
int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode);
- int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire);
+ int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool acquire);
int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state);
int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate);
int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr,
@@ -516,7 +515,7 @@ struct phm_vq_budgeting_record {
struct phm_vq_budgeting_table {
uint8_t numEntries;
- struct phm_vq_budgeting_record entries[1];
+ struct phm_vq_budgeting_record entries[0];
};
struct phm_clock_and_voltage_limits {
@@ -607,8 +606,7 @@ struct phm_ppt_v2_information {
uint8_t uc_dcef_dpm_voltage_mode;
};
-struct phm_ppt_v3_information
-{
+struct phm_ppt_v3_information {
uint8_t uc_thermal_controller_type;
uint16_t us_small_power_limit1;
@@ -811,6 +809,8 @@ struct pp_hwmgr {
bool gfxoff_state_changed_by_workload;
uint32_t pstate_sclk_peak;
uint32_t pstate_mclk_peak;
+
+ struct delayed_work swctf_delayed_work;
};
int hwmgr_early_init(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
index a5f2227a3971..0ffc2347829d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
@@ -131,6 +131,7 @@ struct PP_TemperatureRange {
int mem_min;
int mem_crit_max;
int mem_emergency_max;
+ int sw_ctf_threshold;
};
struct PP_StateValidationBlock {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h
index f7c41185097e..2003acc70ca0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h
@@ -25,14 +25,12 @@
#include "power_state.h"
-static const struct PP_TemperatureRange __maybe_unused SMU7ThermalWithDelayPolicy[] =
-{
+static const struct PP_TemperatureRange __maybe_unused SMU7ThermalWithDelayPolicy[] = {
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
};
-static const struct PP_TemperatureRange __maybe_unused SMU7ThermalPolicy[] =
-{
+static const struct PP_TemperatureRange __maybe_unused SMU7ThermalPolicy[] = {
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
};
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h
index e14072d45918..bfce9087a47f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h
@@ -101,8 +101,7 @@
#define VR_SMIO_PATTERN_2 4
#define VR_STATIC_VOLTAGE 5
-struct SMU7_PIDController
-{
+struct SMU7_PIDController {
uint32_t Ki;
int32_t LFWindupUL;
int32_t LFWindupLL;
@@ -136,8 +135,7 @@ typedef struct SMU7_PIDController SMU7_PIDController;
#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
-struct SMU7_Firmware_Header
-{
+struct SMU7_Firmware_Header {
uint32_t Digest[5];
uint32_t Version;
uint32_t HeaderSize;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h
index 71c9b2d28640..b5f177412769 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h
@@ -118,8 +118,7 @@ typedef struct {
#endif
-struct SMU71_PIDController
-{
+struct SMU71_PIDController {
uint32_t Ki;
int32_t LFWindupUpperLim;
int32_t LFWindupLowerLim;
@@ -133,8 +132,7 @@ struct SMU71_PIDController
typedef struct SMU71_PIDController SMU71_PIDController;
-struct SMU7_LocalDpmScoreboard
-{
+struct SMU7_LocalDpmScoreboard {
uint32_t PercentageBusy;
int32_t PIDError;
@@ -179,8 +177,8 @@ struct SMU7_LocalDpmScoreboard
uint8_t DteClampMode;
uint8_t FpsClampMode;
- uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_GRAPHICS];
- uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_GRAPHICS];
+ uint16_t LevelResidencyCounters[SMU71_MAX_LEVELS_GRAPHICS];
+ uint16_t LevelSwitchCounters[SMU71_MAX_LEVELS_GRAPHICS];
void (*TargetStateCalculator)(uint8_t);
void (*SavedTargetStateCalculator)(uint8_t);
@@ -200,8 +198,7 @@ typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
#define SMU7_MAX_VOLTAGE_CLIENTS 12
-struct SMU7_VoltageScoreboard
-{
+struct SMU7_VoltageScoreboard {
uint16_t CurrentVoltage;
uint16_t HighestVoltage;
uint16_t MaxVid;
@@ -325,8 +322,7 @@ typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
// --------------------------------------------------------------------------------------------------
-struct SMU7_ThermalScoreboard
-{
+struct SMU7_ThermalScoreboard {
int16_t GpuLimit;
int16_t GpuHyst;
uint16_t CurrGnbTemp;
@@ -360,8 +356,7 @@ typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard;
#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
// All 'soft registers' should be uint32_t.
-struct SMU71_SoftRegisters
-{
+struct SMU71_SoftRegisters {
uint32_t RefClockFrequency;
uint32_t PmTimerPeriod;
uint32_t FeatureEnables;
@@ -413,8 +408,7 @@ struct SMU71_SoftRegisters
typedef struct SMU71_SoftRegisters SMU71_SoftRegisters;
-struct SMU71_Firmware_Header
-{
+struct SMU71_Firmware_Header {
uint32_t Digest[5];
uint32_t Version;
uint32_t HeaderSize;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h
index c6b12a4c00db..cf4b2c3c65bc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h
@@ -37,8 +37,7 @@ enum Poly3rdOrderCoeff {
POLY_3RD_ORDER_COUNT
};
-struct SMU7_Poly3rdOrder_Data
-{
+struct SMU7_Poly3rdOrder_Data {
int32_t a;
int32_t b;
int32_t c;
@@ -51,8 +50,7 @@ struct SMU7_Poly3rdOrder_Data
typedef struct SMU7_Poly3rdOrder_Data SMU7_Poly3rdOrder_Data;
-struct Power_Calculator_Data
-{
+struct Power_Calculator_Data {
uint16_t NoLoadVoltage;
uint16_t LoadVoltage;
uint16_t Resistance;
@@ -71,8 +69,7 @@ struct Power_Calculator_Data
typedef struct Power_Calculator_Data PowerCalculatorData_t;
-struct Gc_Cac_Weight_Data
-{
+struct Gc_Cac_Weight_Data {
uint8_t index;
uint32_t value;
};
@@ -187,8 +184,7 @@ typedef struct {
#define SMU73_THERMAL_CLAMP_MODE_COUNT 8
-struct SMU7_HystController_Data
-{
+struct SMU7_HystController_Data {
uint16_t waterfall_up;
uint16_t waterfall_down;
uint16_t waterfall_limit;
@@ -199,8 +195,7 @@ struct SMU7_HystController_Data
typedef struct SMU7_HystController_Data SMU7_HystController_Data;
-struct SMU73_PIDController
-{
+struct SMU73_PIDController {
uint32_t Ki;
int32_t LFWindupUpperLim;
int32_t LFWindupLowerLim;
@@ -215,8 +210,7 @@ struct SMU73_PIDController
typedef struct SMU73_PIDController SMU73_PIDController;
-struct SMU7_LocalDpmScoreboard
-{
+struct SMU7_LocalDpmScoreboard {
uint32_t PercentageBusy;
int32_t PIDError;
@@ -261,8 +255,8 @@ struct SMU7_LocalDpmScoreboard
uint8_t DteClampMode;
uint8_t FpsClampMode;
- uint16_t LevelResidencyCounters [SMU73_MAX_LEVELS_GRAPHICS];
- uint16_t LevelSwitchCounters [SMU73_MAX_LEVELS_GRAPHICS];
+ uint16_t LevelResidencyCounters[SMU73_MAX_LEVELS_GRAPHICS];
+ uint16_t LevelSwitchCounters[SMU73_MAX_LEVELS_GRAPHICS];
void (*TargetStateCalculator)(uint8_t);
void (*SavedTargetStateCalculator)(uint8_t);
@@ -315,8 +309,7 @@ typedef uint8_t (*VoltageChangeHandler_t)(uint16_t, uint8_t);
typedef uint32_t SMU_VoltageLevel;
-struct SMU7_VoltageScoreboard
-{
+struct SMU7_VoltageScoreboard {
SMU_VoltageLevel TargetVoltage;
uint16_t MaxVid;
uint8_t HighestVidOffset;
@@ -354,7 +347,7 @@ struct SMU7_VoltageScoreboard
VoltageChangeHandler_t functionLinks[6];
- uint16_t * VddcFollower1;
+ uint16_t *VddcFollower1;
int16_t Driver_OD_RequestedVidOffset1;
int16_t Driver_OD_RequestedVidOffset2;
@@ -366,8 +359,7 @@ typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
// -------------------------------------------------------------------------------------------------------------------------
#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
-struct SMU7_PCIeLinkSpeedScoreboard
-{
+struct SMU7_PCIeLinkSpeedScoreboard {
uint8_t DpmEnable;
uint8_t DpmRunning;
uint8_t DpmForce;
@@ -396,8 +388,7 @@ typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
#define SMU7_SCALE_I 7
#define SMU7_SCALE_R 12
-struct SMU7_PowerScoreboard
-{
+struct SMU7_PowerScoreboard {
uint32_t GpuPower;
uint32_t VddcPower;
@@ -436,8 +427,7 @@ typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
// All 'soft registers' should be uint32_t.
-struct SMU73_SoftRegisters
-{
+struct SMU73_SoftRegisters {
uint32_t RefClockFrequency;
uint32_t PmTimerPeriod;
uint32_t FeatureEnables;
@@ -493,8 +483,7 @@ struct SMU73_SoftRegisters
typedef struct SMU73_SoftRegisters SMU73_SoftRegisters;
-struct SMU73_Firmware_Header
-{
+struct SMU73_Firmware_Header {
uint32_t Digest[5];
uint32_t Version;
uint32_t HeaderSize;
@@ -708,9 +697,9 @@ typedef struct VFT_CELL_t VFT_CELL_t;
struct VFT_TABLE_t {
VFT_CELL_t Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
- uint16_t AvfsGbv [NUM_VFT_COLUMNS];
- uint16_t BtcGbv [NUM_VFT_COLUMNS];
- uint16_t Temperature [TEMP_RANGE_MAXSTEPS];
+ uint16_t AvfsGbv[NUM_VFT_COLUMNS];
+ uint16_t BtcGbv[NUM_VFT_COLUMNS];
+ uint16_t Temperature[TEMP_RANGE_MAXSTEPS];
uint8_t NumTemperatureSteps;
uint8_t padding[3];
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h
index 5916be08a7fe..fd0964ac465e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h
@@ -27,8 +27,7 @@
#pragma pack(push, 1)
-struct SMIO_Pattern
-{
+struct SMIO_Pattern {
uint16_t Voltage;
uint8_t Smio;
uint8_t padding;
@@ -36,8 +35,7 @@ struct SMIO_Pattern
typedef struct SMIO_Pattern SMIO_Pattern;
-struct SMIO_Table
-{
+struct SMIO_Table {
SMIO_Pattern Pattern[SMU_MAX_SMIO_LEVELS];
};
@@ -100,8 +98,7 @@ struct SMU73_Discrete_Ulv {
typedef struct SMU73_Discrete_Ulv SMU73_Discrete_Ulv;
-struct SMU73_Discrete_MemoryLevel
-{
+struct SMU73_Discrete_MemoryLevel {
uint32_t MinVoltage;
uint32_t MinMvdd;
@@ -124,10 +121,9 @@ struct SMU73_Discrete_MemoryLevel
typedef struct SMU73_Discrete_MemoryLevel SMU73_Discrete_MemoryLevel;
-struct SMU73_Discrete_LinkLevel
-{
+struct SMU73_Discrete_LinkLevel {
uint8_t PcieGenSpeed; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3
- uint8_t PcieLaneCount; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
+ uint8_t PcieLaneCount; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
uint8_t EnabledForActivity;
uint8_t SPC;
uint32_t DownThreshold;
@@ -139,8 +135,7 @@ typedef struct SMU73_Discrete_LinkLevel SMU73_Discrete_LinkLevel;
// MC ARB DRAM Timing registers.
-struct SMU73_Discrete_MCArbDramTimingTableEntry
-{
+struct SMU73_Discrete_MCArbDramTimingTableEntry {
uint32_t McArbDramTiming;
uint32_t McArbDramTiming2;
uint8_t McArbBurstTime;
@@ -151,16 +146,14 @@ struct SMU73_Discrete_MCArbDramTimingTableEntry
typedef struct SMU73_Discrete_MCArbDramTimingTableEntry SMU73_Discrete_MCArbDramTimingTableEntry;
-struct SMU73_Discrete_MCArbDramTimingTable
-{
+struct SMU73_Discrete_MCArbDramTimingTable {
SMU73_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
};
typedef struct SMU73_Discrete_MCArbDramTimingTable SMU73_Discrete_MCArbDramTimingTable;
// UVD VCLK/DCLK state (level) definition.
-struct SMU73_Discrete_UvdLevel
-{
+struct SMU73_Discrete_UvdLevel {
uint32_t VclkFrequency;
uint32_t DclkFrequency;
uint32_t MinVoltage;
@@ -172,8 +165,7 @@ struct SMU73_Discrete_UvdLevel
typedef struct SMU73_Discrete_UvdLevel SMU73_Discrete_UvdLevel;
// Clocks for other external blocks (VCE, ACP, SAMU).
-struct SMU73_Discrete_ExtClkLevel
-{
+struct SMU73_Discrete_ExtClkLevel {
uint32_t Frequency;
uint32_t MinVoltage;
uint8_t Divider;
@@ -182,8 +174,7 @@ struct SMU73_Discrete_ExtClkLevel
typedef struct SMU73_Discrete_ExtClkLevel SMU73_Discrete_ExtClkLevel;
-struct SMU73_Discrete_StateInfo
-{
+struct SMU73_Discrete_StateInfo {
uint32_t SclkFrequency;
uint32_t MclkFrequency;
uint32_t VclkFrequency;
@@ -206,8 +197,7 @@ struct SMU73_Discrete_StateInfo
typedef struct SMU73_Discrete_StateInfo SMU73_Discrete_StateInfo;
-struct SMU73_Discrete_DpmTable
-{
+struct SMU73_Discrete_DpmTable {
// Multi-DPM controller settings
SMU73_PIDController GraphicsPIDController;
SMU73_PIDController MemoryPIDController;
@@ -225,9 +215,9 @@ struct SMU73_Discrete_DpmTable
uint32_t MvddLevelCount;
- uint8_t BapmVddcVidHiSidd [SMU73_MAX_LEVELS_VDDC];
- uint8_t BapmVddcVidLoSidd [SMU73_MAX_LEVELS_VDDC];
- uint8_t BapmVddcVidHiSidd2 [SMU73_MAX_LEVELS_VDDC];
+ uint8_t BapmVddcVidHiSidd[SMU73_MAX_LEVELS_VDDC];
+ uint8_t BapmVddcVidLoSidd[SMU73_MAX_LEVELS_VDDC];
+ uint8_t BapmVddcVidHiSidd2[SMU73_MAX_LEVELS_VDDC];
uint8_t GraphicsDpmLevelCount;
uint8_t MemoryDpmLevelCount;
@@ -246,19 +236,19 @@ struct SMU73_Discrete_DpmTable
uint32_t Reserved[4];
// State table entries for each DPM state
- SMU73_Discrete_GraphicsLevel GraphicsLevel [SMU73_MAX_LEVELS_GRAPHICS];
+ SMU73_Discrete_GraphicsLevel GraphicsLevel[SMU73_MAX_LEVELS_GRAPHICS];
SMU73_Discrete_MemoryLevel MemoryACPILevel;
- SMU73_Discrete_MemoryLevel MemoryLevel [SMU73_MAX_LEVELS_MEMORY];
- SMU73_Discrete_LinkLevel LinkLevel [SMU73_MAX_LEVELS_LINK];
+ SMU73_Discrete_MemoryLevel MemoryLevel[SMU73_MAX_LEVELS_MEMORY];
+ SMU73_Discrete_LinkLevel LinkLevel[SMU73_MAX_LEVELS_LINK];
SMU73_Discrete_ACPILevel ACPILevel;
- SMU73_Discrete_UvdLevel UvdLevel [SMU73_MAX_LEVELS_UVD];
- SMU73_Discrete_ExtClkLevel VceLevel [SMU73_MAX_LEVELS_VCE];
- SMU73_Discrete_ExtClkLevel AcpLevel [SMU73_MAX_LEVELS_ACP];
- SMU73_Discrete_ExtClkLevel SamuLevel [SMU73_MAX_LEVELS_SAMU];
+ SMU73_Discrete_UvdLevel UvdLevel[SMU73_MAX_LEVELS_UVD];
+ SMU73_Discrete_ExtClkLevel VceLevel[SMU73_MAX_LEVELS_VCE];
+ SMU73_Discrete_ExtClkLevel AcpLevel[SMU73_MAX_LEVELS_ACP];
+ SMU73_Discrete_ExtClkLevel SamuLevel[SMU73_MAX_LEVELS_SAMU];
SMU73_Discrete_Ulv Ulv;
uint32_t SclkStepSize;
- uint32_t Smio [SMU73_MAX_ENTRIES_SMIO];
+ uint32_t Smio[SMU73_MAX_ENTRIES_SMIO];
uint8_t UvdBootLevel;
uint8_t VceBootLevel;
@@ -368,8 +358,7 @@ typedef struct SMU73_Discrete_DpmTable SMU73_Discrete_DpmTable;
// --------------------------------------------------- Fan Table -----------------------------------------------------------
-struct SMU73_Discrete_FanTable
-{
+struct SMU73_Discrete_FanTable {
uint16_t FdoMode;
int16_t TempMin;
int16_t TempMed;
@@ -397,8 +386,7 @@ typedef struct SMU73_Discrete_FanTable SMU73_Discrete_FanTable;
-struct SMU7_MclkDpmScoreboard
-{
+struct SMU7_MclkDpmScoreboard {
uint32_t PercentageBusy;
@@ -448,8 +436,8 @@ struct SMU7_MclkDpmScoreboard
uint8_t VbiWaitCounter;
uint8_t EnabledLevelsChange;
- uint16_t LevelResidencyCounters [SMU73_MAX_LEVELS_MEMORY];
- uint16_t LevelSwitchCounters [SMU73_MAX_LEVELS_MEMORY];
+ uint16_t LevelResidencyCounters[SMU73_MAX_LEVELS_MEMORY];
+ uint16_t LevelSwitchCounters[SMU73_MAX_LEVELS_MEMORY];
void (*TargetStateCalculator)(uint8_t);
void (*SavedTargetStateCalculator)(uint8_t);
@@ -469,8 +457,7 @@ struct SMU7_MclkDpmScoreboard
typedef struct SMU7_MclkDpmScoreboard SMU7_MclkDpmScoreboard;
-struct SMU7_UlvScoreboard
-{
+struct SMU7_UlvScoreboard {
uint8_t EnterUlv;
uint8_t ExitUlv;
uint8_t UlvActive;
@@ -485,8 +472,7 @@ struct SMU7_UlvScoreboard
typedef struct SMU7_UlvScoreboard SMU7_UlvScoreboard;
-struct VddgfxSavedRegisters
-{
+struct VddgfxSavedRegisters {
uint32_t GPU_DBG[3];
uint32_t MEC_BaseAddress_Hi;
uint32_t MEC_BaseAddress_Lo;
@@ -497,8 +483,7 @@ struct VddgfxSavedRegisters
typedef struct VddgfxSavedRegisters VddgfxSavedRegisters;
-struct SMU7_VddGfxScoreboard
-{
+struct SMU7_VddGfxScoreboard {
uint8_t VddGfxEnable;
uint8_t VddGfxActive;
uint8_t VPUResetOccured;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h
index 771523001533..7d5ed7751976 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h
@@ -224,8 +224,8 @@ struct SMU7_LocalDpmScoreboard {
uint8_t DteClampMode;
uint8_t FpsClampMode;
- uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_GRAPHICS];
- uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_GRAPHICS];
+ uint16_t LevelResidencyCounters[SMU75_MAX_LEVELS_GRAPHICS];
+ uint16_t LevelSwitchCounters[SMU75_MAX_LEVELS_GRAPHICS];
void (*TargetStateCalculator)(uint8_t);
void (*SavedTargetStateCalculator)(uint8_t);
@@ -316,7 +316,7 @@ struct SMU7_VoltageScoreboard {
VoltageChangeHandler_t functionLinks[6];
- uint16_t * VddcFollower1;
+ uint16_t *VddcFollower1;
int16_t Driver_OD_RequestedVidOffset1;
int16_t Driver_OD_RequestedVidOffset2;
};
@@ -677,9 +677,9 @@ typedef struct SCS_CELL_t SCS_CELL_t;
struct VFT_TABLE_t {
VFT_CELL_t Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
- uint16_t AvfsGbv [NUM_VFT_COLUMNS];
- uint16_t BtcGbv [NUM_VFT_COLUMNS];
- int16_t Temperature [TEMP_RANGE_MAXSTEPS];
+ uint16_t AvfsGbv[NUM_VFT_COLUMNS];
+ uint16_t BtcGbv[NUM_VFT_COLUMNS];
+ int16_t Temperature[TEMP_RANGE_MAXSTEPS];
#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
SCS_CELL_t ScksCell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h
index 78ada9ffd508..e130f52fe8d6 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h
@@ -36,8 +36,7 @@
#define SMU7_NUM_NON_TES 2
// All 'soft registers' should be uint32_t.
-struct SMU7_SoftRegisters
-{
+struct SMU7_SoftRegisters {
uint32_t RefClockFrequency;
uint32_t PmTimerP;
uint32_t FeatureEnables;
@@ -80,8 +79,7 @@ struct SMU7_SoftRegisters
typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
-struct SMU7_Fusion_GraphicsLevel
-{
+struct SMU7_Fusion_GraphicsLevel {
uint32_t MinVddNb;
uint32_t SclkFrequency;
@@ -111,8 +109,7 @@ struct SMU7_Fusion_GraphicsLevel
typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel;
-struct SMU7_Fusion_GIOLevel
-{
+struct SMU7_Fusion_GIOLevel {
uint8_t EnabledForActivity;
uint8_t LclkDid;
uint8_t Vid;
@@ -137,8 +134,7 @@ struct SMU7_Fusion_GIOLevel
typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel;
// UVD VCLK/DCLK state (level) definition.
-struct SMU7_Fusion_UvdLevel
-{
+struct SMU7_Fusion_UvdLevel {
uint32_t VclkFrequency;
uint32_t DclkFrequency;
uint16_t MinVddNb;
@@ -155,8 +151,7 @@ struct SMU7_Fusion_UvdLevel
typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel;
// Clocks for other external blocks (VCE, ACP, SAMU).
-struct SMU7_Fusion_ExtClkLevel
-{
+struct SMU7_Fusion_ExtClkLevel {
uint32_t Frequency;
uint16_t MinVoltage;
uint8_t Divider;
@@ -166,8 +161,7 @@ struct SMU7_Fusion_ExtClkLevel
};
typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel;
-struct SMU7_Fusion_ACPILevel
-{
+struct SMU7_Fusion_ACPILevel {
uint32_t Flags;
uint32_t MinVddNb;
uint32_t SclkFrequency;
@@ -181,8 +175,7 @@ struct SMU7_Fusion_ACPILevel
typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel;
-struct SMU7_Fusion_NbDpm
-{
+struct SMU7_Fusion_NbDpm {
uint8_t DpmXNbPsHi;
uint8_t DpmXNbPsLo;
uint8_t Dpm0PgNbPsHi;
@@ -197,8 +190,7 @@ struct SMU7_Fusion_NbDpm
typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm;
-struct SMU7_Fusion_StateInfo
-{
+struct SMU7_Fusion_StateInfo {
uint32_t SclkFrequency;
uint32_t LclkFrequency;
uint32_t VclkFrequency;
@@ -214,8 +206,7 @@ struct SMU7_Fusion_StateInfo
typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo;
-struct SMU7_Fusion_DpmTable
-{
+struct SMU7_Fusion_DpmTable {
uint32_t SystemFlags;
SMU7_PIDController GraphicsPIDController;
@@ -230,12 +221,12 @@ struct SMU7_Fusion_DpmTable
uint8_t SamuLevelCount;
uint16_t FpsHighT;
- SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE];
+ SMU7_Fusion_GraphicsLevel GraphicsLevel[SMU__NUM_SCLK_DPM_STATE];
SMU7_Fusion_ACPILevel ACPILevel;
- SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
- SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
- SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
- SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
+ SMU7_Fusion_UvdLevel UvdLevel[SMU7_MAX_LEVELS_UVD];
+ SMU7_Fusion_ExtClkLevel VceLevel[SMU7_MAX_LEVELS_VCE];
+ SMU7_Fusion_ExtClkLevel AcpLevel[SMU7_MAX_LEVELS_ACP];
+ SMU7_Fusion_ExtClkLevel SamuLevel[SMU7_MAX_LEVELS_SAMU];
uint8_t UvdBootLevel;
uint8_t VceBootLevel;
@@ -266,10 +257,9 @@ struct SMU7_Fusion_DpmTable
};
-struct SMU7_Fusion_GIODpmTable
-{
+struct SMU7_Fusion_GIODpmTable {
- SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO];
+ SMU7_Fusion_GIOLevel GIOLevel[SMU7_MAX_LEVELS_GIO];
SMU7_PIDController GioPIDController;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
index faae4b918d90..2c69a5694f94 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
@@ -178,20 +178,20 @@ typedef struct {
uint8_t padding8_2[2];
/* SOC Frequencies */
- PllSetting_t GfxclkLevel [NUM_GFXCLK_DPM_LEVELS];
+ PllSetting_t GfxclkLevel[NUM_GFXCLK_DPM_LEVELS];
- uint8_t SocclkDid [NUM_SOCCLK_DPM_LEVELS]; /* DID */
- uint8_t SocDpmVoltageIndex [NUM_SOCCLK_DPM_LEVELS];
+ uint8_t SocclkDid[NUM_SOCCLK_DPM_LEVELS]; /* DID */
+ uint8_t SocDpmVoltageIndex[NUM_SOCCLK_DPM_LEVELS];
- uint8_t VclkDid [NUM_UVD_DPM_LEVELS]; /* DID */
- uint8_t DclkDid [NUM_UVD_DPM_LEVELS]; /* DID */
- uint8_t UvdDpmVoltageIndex [NUM_UVD_DPM_LEVELS];
+ uint8_t VclkDid[NUM_UVD_DPM_LEVELS]; /* DID */
+ uint8_t DclkDid[NUM_UVD_DPM_LEVELS]; /* DID */
+ uint8_t UvdDpmVoltageIndex[NUM_UVD_DPM_LEVELS];
- uint8_t EclkDid [NUM_VCE_DPM_LEVELS]; /* DID */
- uint8_t VceDpmVoltageIndex [NUM_VCE_DPM_LEVELS];
+ uint8_t EclkDid[NUM_VCE_DPM_LEVELS]; /* DID */
+ uint8_t VceDpmVoltageIndex[NUM_VCE_DPM_LEVELS];
- uint8_t Mp0clkDid [NUM_MP0CLK_DPM_LEVELS]; /* DID */
- uint8_t Mp0DpmVoltageIndex [NUM_MP0CLK_DPM_LEVELS];
+ uint8_t Mp0clkDid[NUM_MP0CLK_DPM_LEVELS]; /* DID */
+ uint8_t Mp0DpmVoltageIndex[NUM_MP0CLK_DPM_LEVELS];
DisplayClockTable_t DisplayClockTable[DSPCLK_COUNT][NUM_DSPCLK_LEVELS];
QuadraticInt_t DisplayClock2Gfxclk[DSPCLK_COUNT];
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index 4bc8db1be738..9e4228232f02 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -2732,7 +2732,7 @@ static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
static int ci_smu_init(struct pp_hwmgr *hwmgr)
{
- struct ci_smumgr *ci_priv = NULL;
+ struct ci_smumgr *ci_priv;
ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
index 02c094a06605..5e43ad2b2956 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
@@ -332,7 +332,7 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
static int fiji_smu_init(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *fiji_priv = NULL;
+ struct fiji_smumgr *fiji_priv;
fiji_priv = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 060fc140c574..97d9802fe673 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -259,7 +259,7 @@ static int iceland_start_smu(struct pp_hwmgr *hwmgr)
static int iceland_smu_init(struct pp_hwmgr *hwmgr)
{
- struct iceland_smumgr *iceland_priv = NULL;
+ struct iceland_smumgr *iceland_priv;
iceland_priv = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
index e7ed2a7adf8f..ff6b563ecbf5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
@@ -1888,7 +1888,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
(avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
(avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
(avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
- data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+ data->apply_avfs_cks_off_voltage = avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1;
}
return result;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
index acbe41174d7e..6fe6e6abb5d8 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
@@ -226,7 +226,7 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
static int tonga_smu_init(struct pp_hwmgr *hwmgr)
{
- struct tonga_smumgr *tonga_priv = NULL;
+ struct tonga_smumgr *tonga_priv;
tonga_priv = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL);
if (tonga_priv == NULL)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
index 7d024d3facef..34c9f59b889a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
@@ -295,9 +295,8 @@ static int vegam_process_firmware_header(struct pp_hwmgr *hwmgr)
static bool vegam_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return 1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON);
}
static uint32_t vegam_get_mac_definition(uint32_t value)
@@ -1660,7 +1659,7 @@ static int vegam_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
(avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
(avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
data->apply_avfs_cks_off_voltage =
- (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1;
}
return result;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 5633c5797e85..f005a90c35af 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <linux/pci.h>
+#include <linux/reboot.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -617,7 +618,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
arcturus_set_ppt_funcs(smu);
/* OD is not supported on Arcturus */
- smu->od_enabled =false;
+ smu->od_enabled = false;
break;
case IP_VERSION(13, 0, 2):
aldebaran_set_ppt_funcs(smu);
@@ -733,6 +734,24 @@ static int smu_late_init(void *handle)
return ret;
}
+ /*
+ * Explicitly notify PMFW the power mode the system in. Since
+ * the PMFW may boot the ASIC with a different mode.
+ * For those supporting ACDC switch via gpio, PMFW will
+ * handle the switch automatically. Driver involvement
+ * is unnecessary.
+ */
+ if (!smu->dc_controlled_by_gpio) {
+ ret = smu_set_power_source(smu,
+ adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
+ SMU_POWER_SOURCE_DC);
+ if (ret) {
+ dev_err(adev->dev, "Failed to switch to %s mode!\n",
+ adev->pm.ac_power ? "AC" : "DC");
+ return ret;
+ }
+ }
+
if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
(adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
return 0;
@@ -804,11 +823,20 @@ static int smu_init_fb_allocations(struct smu_context *smu)
}
}
+ driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
/* VRAM allocation for driver table */
for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
+ /* If one of the tables has VRAM domain restriction, keep it in
+ * VRAM
+ */
+ if ((tables[i].domain &
+ (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
+ AMDGPU_GEM_DOMAIN_VRAM)
+ driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
if (i == SMU_TABLE_PMSTATUSLOG)
continue;
@@ -818,7 +846,6 @@ static int smu_init_fb_allocations(struct smu_context *smu)
driver_table->size = max_table_size;
driver_table->align = PAGE_SIZE;
- driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
ret = amdgpu_bo_create_kernel(adev,
driver_table->size,
@@ -1052,6 +1079,34 @@ static void smu_interrupt_work_fn(struct work_struct *work)
smu->ppt_funcs->interrupt_work(smu);
}
+static void smu_swctf_delayed_work_handler(struct work_struct *work)
+{
+ struct smu_context *smu =
+ container_of(work, struct smu_context, swctf_delayed_work.work);
+ struct smu_temperature_range *range =
+ &smu->thermal_range;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t hotspot_tmp, size;
+
+ /*
+ * If the hotspot temperature is confirmed as below SW CTF setting point
+ * after the delay enforced, nothing will be done.
+ * Otherwise, a graceful shutdown will be performed to prevent further damage.
+ */
+ if (range->software_shutdown_temp &&
+ smu->ppt_funcs->read_sensor &&
+ !smu->ppt_funcs->read_sensor(smu,
+ AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
+ &hotspot_tmp,
+ &size) &&
+ hotspot_tmp / 1000 < range->software_shutdown_temp)
+ return;
+
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
+}
+
static int smu_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1094,6 +1149,9 @@ static int smu_sw_init(void *handle)
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ INIT_DELAYED_WORK(&smu->swctf_delayed_work,
+ smu_swctf_delayed_work_handler);
+
ret = smu_smc_table_sw_init(smu);
if (ret) {
dev_err(adev->dev, "Failed to sw init smc table!\n");
@@ -1523,9 +1581,9 @@ static int smu_disable_dpms(struct smu_context *smu)
/*
* For SMU 13.0.4/11, PMFW will handle the features disablement properly
- * for gpu reset case. Driver involvement is unnecessary.
+ * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
*/
- if (amdgpu_in_reset(adev)) {
+ if (amdgpu_in_reset(adev) || adev->in_s0ix) {
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
@@ -1574,6 +1632,8 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return ret;
}
+ cancel_delayed_work_sync(&smu->swctf_delayed_work);
+
ret = smu_disable_dpms(smu);
if (ret) {
dev_err(adev->dev, "Fail to disable dpm features!\n");
@@ -1588,7 +1648,7 @@ static int smu_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
- if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
smu_dpm_set_vcn_enable(smu, false);
@@ -1640,7 +1700,7 @@ static int smu_suspend(void *handle)
int ret;
uint64_t count;
- if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
if (!smu->pm_enabled)
@@ -2157,8 +2217,7 @@ const struct amd_ip_funcs smu_ip_funcs = {
.set_powergating_state = smu_set_powergating_state,
};
-const struct amdgpu_ip_block_version smu_v11_0_ip_block =
-{
+const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SMC,
.major = 11,
.minor = 0,
@@ -2166,8 +2225,7 @@ const struct amdgpu_ip_block_version smu_v11_0_ip_block =
.funcs = &smu_ip_funcs,
};
-const struct amdgpu_ip_block_version smu_v12_0_ip_block =
-{
+const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SMC,
.major = 12,
.minor = 0,
@@ -2175,8 +2233,7 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
.funcs = &smu_ip_funcs,
};
-const struct amdgpu_ip_block_version smu_v13_0_ip_block =
-{
+const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SMC,
.major = 13,
.minor = 0,
@@ -2277,7 +2334,7 @@ int smu_get_power_limit(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- switch(pp_power_type) {
+ switch (pp_power_type) {
case PP_PWR_TYPE_SUSTAINED:
limit_type = SMU_DEFAULT_PPT_LIMIT;
break;
@@ -2289,7 +2346,7 @@ int smu_get_power_limit(void *handle,
break;
}
- switch(pp_limit_level){
+ switch (pp_limit_level) {
case PP_PWR_LIMIT_CURRENT:
limit_level = SMU_PPT_LIMIT_CURRENT;
break;
@@ -2535,7 +2592,7 @@ static int smu_read_sensor(void *handle,
*size = 4;
break;
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
- *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
+ *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
*size = 4;
break;
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
@@ -2808,7 +2865,7 @@ static int smu_set_xgmi_pstate(void *handle,
if (smu->ppt_funcs->set_xgmi_pstate)
ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
- if(ret)
+ if (ret)
dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 09469c750a96..95eb8a5eb54f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -200,29 +200,25 @@ struct smu_power_state {
struct smu_hw_power_state hardware;
};
-enum smu_power_src_type
-{
+enum smu_power_src_type {
SMU_POWER_SOURCE_AC,
SMU_POWER_SOURCE_DC,
SMU_POWER_SOURCE_COUNT,
};
-enum smu_ppt_limit_type
-{
+enum smu_ppt_limit_type {
SMU_DEFAULT_PPT_LIMIT = 0,
SMU_FAST_PPT_LIMIT,
};
-enum smu_ppt_limit_level
-{
+enum smu_ppt_limit_level {
SMU_PPT_LIMIT_MIN = -1,
SMU_PPT_LIMIT_CURRENT,
SMU_PPT_LIMIT_DEFAULT,
SMU_PPT_LIMIT_MAX,
};
-enum smu_memory_pool_size
-{
+enum smu_memory_pool_size {
SMU_MEMORY_POOL_SIZE_ZERO = 0,
SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000,
SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000,
@@ -282,8 +278,7 @@ struct smu_clock_info {
uint32_t max_bus_bandwidth;
};
-struct smu_bios_boot_up_values
-{
+struct smu_bios_boot_up_values {
uint32_t revision;
uint32_t gfxclk;
uint32_t uclk;
@@ -305,8 +300,7 @@ struct smu_bios_boot_up_values
uint32_t firmware_caps;
};
-enum smu_table_id
-{
+enum smu_table_id {
SMU_TABLE_PPTABLE = 0,
SMU_TABLE_WATERMARKS,
SMU_TABLE_CUSTOM_DPM,
@@ -326,8 +320,7 @@ enum smu_table_id
SMU_TABLE_COUNT,
};
-struct smu_table_context
-{
+struct smu_table_context {
void *power_play_table;
uint32_t power_play_table_size;
void *hardcode_pptable;
@@ -390,8 +383,7 @@ struct smu_power_context {
};
#define SMU_FEATURE_MAX (64)
-struct smu_feature
-{
+struct smu_feature {
uint32_t feature_num;
DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
@@ -416,21 +408,18 @@ struct mclock_latency_table {
struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
};
-enum smu_reset_mode
-{
+enum smu_reset_mode {
SMU_RESET_MODE_0,
SMU_RESET_MODE_1,
SMU_RESET_MODE_2,
};
-enum smu_baco_state
-{
+enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
};
-struct smu_baco_context
-{
+struct smu_baco_context {
uint32_t state;
bool platform_support;
bool maco_support;
@@ -478,8 +467,7 @@ struct stb_context {
#define WORKLOAD_POLICY_MAX 7
-struct smu_context
-{
+struct smu_context {
struct amdgpu_device *adev;
struct amdgpu_irq_src irq_source;
@@ -573,6 +561,8 @@ struct smu_context
u32 debug_param_reg;
u32 debug_msg_reg;
u32 debug_resp_reg;
+
+ struct delayed_work swctf_delayed_work;
};
struct i2c_adapter;
@@ -1396,6 +1386,7 @@ typedef enum {
METRICS_PCIE_RATE,
METRICS_PCIE_WIDTH,
METRICS_CURR_FANPWM,
+ METRICS_CURR_SOCKETPOWER,
} MetricsMember_t;
enum smu_cmn2asic_mapping_type {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
index 90200f31ff52..cddf45eebee8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
@@ -24,6 +24,8 @@
#ifndef SMU13_DRIVER_IF_ALDEBARAN_H
#define SMU13_DRIVER_IF_ALDEBARAN_H
+#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
+
#define NUM_VCLK_DPM_LEVELS 8
#define NUM_DCLK_DPM_LEVELS 8
#define NUM_SOCCLK_DPM_LEVELS 8
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index b686fb68a6e7..9dd1ed5b8940 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -24,8 +24,10 @@
#ifndef SMU13_DRIVER_IF_V13_0_0_H
#define SMU13_DRIVER_IF_V13_0_0_H
+#define SMU13_0_0_DRIVER_IF_VERSION 0x3D
+
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x26
+#define PPTABLE_VERSION 0x2B
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -94,7 +96,7 @@
#define FEATURE_ATHUB_MMHUB_PG_BIT 48
#define FEATURE_SOC_PCC_BIT 49
#define FEATURE_EDC_PWRBRK_BIT 50
-#define FEATURE_SPARE_51_BIT 51
+#define FEATURE_BOMXCO_SVI3_PROG_BIT 51
#define FEATURE_SPARE_52_BIT 52
#define FEATURE_SPARE_53_BIT 53
#define FEATURE_SPARE_54_BIT 54
@@ -310,6 +312,7 @@ typedef enum {
I2C_CONTROLLER_PROTOCOL_VR_IR35217,
I2C_CONTROLLER_PROTOCOL_TMP_MAX31875,
I2C_CONTROLLER_PROTOCOL_INA3221,
+ I2C_CONTROLLER_PROTOCOL_TMP_MAX6604,
I2C_CONTROLLER_PROTOCOL_COUNT,
} I2cControllerProtocol_e;
@@ -568,6 +571,7 @@ typedef enum {
} POWER_SOURCE_e;
typedef enum {
+ MEM_VENDOR_PLACEHOLDER0,
MEM_VENDOR_SAMSUNG,
MEM_VENDOR_INFINEON,
MEM_VENDOR_ELPIDA,
@@ -577,7 +581,6 @@ typedef enum {
MEM_VENDOR_MOSEL,
MEM_VENDOR_WINBOND,
MEM_VENDOR_ESMT,
- MEM_VENDOR_PLACEHOLDER0,
MEM_VENDOR_PLACEHOLDER1,
MEM_VENDOR_PLACEHOLDER2,
MEM_VENDOR_PLACEHOLDER3,
@@ -665,7 +668,14 @@ typedef enum {
#define PP_NUM_RTAVFS_PWL_ZONES 5
-
+#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0
+#define PP_OD_FEATURE_PPT_BIT 2
+#define PP_OD_FEATURE_FAN_CURVE_BIT 3
+#define PP_OD_FEATURE_GFXCLK_BIT 7
+#define PP_OD_FEATURE_UCLK_BIT 8
+#define PP_OD_FEATURE_ZERO_FAN_BIT 9
+#define PP_OD_FEATURE_TEMPERATURE_BIT 10
+#define PP_OD_FEATURE_COUNT 13
// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3
// Slope Q1.7, Offset Q1.2
@@ -687,10 +697,8 @@ typedef struct {
//Voltage control
int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
- uint16_t VddGfxVmax; // in mV
- uint8_t IdlePwrSavingFeaturesCtrl;
- uint8_t RuntimePwrSavingFeaturesCtrl;
+ uint32_t Reserved;
//Frequency changes
int16_t GfxclkFmin; // MHz
@@ -727,10 +735,9 @@ typedef struct {
uint32_t FeatureCtrlMask;
int16_t VoltageOffsetPerZoneBoundary;
- uint16_t VddGfxVmax; // in mV
+ uint16_t Reserved1;
- uint8_t IdlePwrSavingFeaturesCtrl;
- uint8_t RuntimePwrSavingFeaturesCtrl;
+ uint16_t Reserved2;
int16_t GfxclkFmin; // MHz
int16_t GfxclkFmax; // MHz
@@ -806,6 +813,9 @@ typedef enum {
#define INVALID_BOARD_GPIO 0xFF
+#define MARKETING_BASE_CLOCKS 0
+#define MARKETING_GAME_CLOCKS 1
+#define MARKETING_BOOST_CLOCKS 2
typedef struct {
//PLL 0
@@ -1096,10 +1106,15 @@ typedef struct {
uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase.
uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin.
+ uint8_t FoptEnabled;
+ uint8_t DcsSpare2[3];
+ uint32_t DcsFoptM; //Tuning paramters to shift Fopt calculation
+ uint32_t DcsFoptB; //Tuning paramters to shift Fopt calculation
- uint32_t DcsSpare[16];
+ uint32_t DcsSpare[11];
// UCLK section
+ uint16_t ShadowFreqTableUclk[NUM_UCLK_DPM_LEVELS]; // In MHz
uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations
uint8_t PaddingMem[3];
@@ -1245,8 +1260,13 @@ typedef struct {
QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT];
QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT];
+ uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix
+ uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron
+ uint16_t TemperatureFwCtfLimit_Hynix;
+ uint16_t TemperatureFwCtfLimit_Micron;
+
// SECTION: Sku Reserved
- uint32_t Spare[43];
+ uint32_t Spare[41];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
@@ -1318,8 +1338,9 @@ typedef struct {
// UCLK Spread Spectrum
uint8_t UclkSpreadPercent[MEM_VENDOR_COUNT];
+ uint8_t GfxclkSpreadEnable;
+
// FCLK Spread Spectrum
- uint8_t FclkSpreadPadding;
uint8_t FclkSpreadPercent; // Q4.4
uint16_t FclkSpreadFreq; // kHz
@@ -1444,6 +1465,8 @@ typedef struct {
uint8_t ThrottlingPercentage[THROTTLER_COUNT];
+ uint8_t VmaxThrottlingPercentage;
+ uint8_t Padding1[3];
//metrics for D3hot entry/exit and driver ARM msgs
uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
@@ -1463,7 +1486,7 @@ typedef struct {
typedef struct {
SmuMetrics_t SmuMetrics;
- uint32_t Spare[30];
+ uint32_t Spare[29];
// Padding - ignore
uint32_t MmHubPadding[8]; // SMU internal use
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index 2162ecd1057d..fee9293b3f97 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 8
+#define SMU13_0_4_DRIVER_IF_VERSION 8
typedef struct {
int32_t value;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h
index aa971412b434..779c2524806c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h
@@ -23,7 +23,7 @@
#ifndef __SMU13_DRIVER_IF_V13_0_5_H__
#define __SMU13_DRIVER_IF_V13_0_5_H__
-#define PMFW_DRIVER_IF_VERSION 4
+#define SMU13_0_5_DRIVER_IF_VERSION 5
// Throttler Status Bitmask
#define THROTTLER_STATUS_BIT_SPL 0
@@ -103,7 +103,6 @@ typedef struct {
uint16_t ThrottlerStatus;
uint16_t CurrentSocketPower; //[mW]
- uint16_t spare1;
} SmuMetrics_t;
//Freq in MHz
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
index be596777cd2c..ca4a5e99ccd1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h
@@ -26,7 +26,7 @@
// *** IMPORTANT ***
// PMFW TEAM: Always increment the interface version if
// anything is changed in this file
-#define SMU13_0_6_DRIVER_IF_VERSION 0x08042022
+#define SMU13_0_6_DRIVER_IF_VERSION 0x08042024
//I2C Interface
#define NUM_I2C_CONTROLLERS 8
@@ -106,7 +106,7 @@ typedef enum {
} UCLK_DPM_MODE_e;
typedef struct {
- //0-26 SOC, 27-29 SOCIO
+ //0-23 SOC, 24-26 SOCIO, 27-29 SOC
uint16_t avgPsmCount[30];
uint16_t minPsmCount[30];
float avgPsmVoltage[30];
@@ -121,6 +121,17 @@ typedef struct {
float minPsmVoltage[30];
} AvfsDebugTableXcd_t;
+// Defines used for IH-based thermal interrupts to GFX driver - A/X only
+#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
+#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
+
+//thermal over-temp mask defines for IH interrupt to host
+#define THROTTLER_PROCHOT_BIT 0
+#define THROTTLER_PPT_BIT 1
+#define THROTTLER_THERMAL_SOCKET_BIT 2//AID, XCD, CCD throttling
+#define THROTTLER_THERMAL_VR_BIT 3//VRHOT
+#define THROTTLER_THERMAL_HBM_BIT 4
+
// These defines are used with the following messages:
// SMC_MSG_TransferTableDram2Smu
// SMC_MSG_TransferTableSmu2Dram
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index 4c46a0392451..62b7c0daff68 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -25,7 +25,7 @@
// *** IMPORTANT ***
// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x35
+#define SMU13_0_7_DRIVER_IF_VERSION 0x35
//Increment this version if SkuTable_t or BoardTable_t change
#define PPTABLE_VERSION 0x27
@@ -683,18 +683,12 @@ typedef struct {
#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0
-#define PP_OD_FEATURE_VMAX_BIT 1
#define PP_OD_FEATURE_PPT_BIT 2
#define PP_OD_FEATURE_FAN_CURVE_BIT 3
-#define PP_OD_FEATURE_FREQ_DETER_BIT 4
-#define PP_OD_FEATURE_FULL_CTRL_BIT 5
-#define PP_OD_FEATURE_TDC_BIT 6
#define PP_OD_FEATURE_GFXCLK_BIT 7
#define PP_OD_FEATURE_UCLK_BIT 8
#define PP_OD_FEATURE_ZERO_FAN_BIT 9
#define PP_OD_FEATURE_TEMPERATURE_BIT 10
-#define PP_OD_FEATURE_POWER_FEATURE_CTRL_BIT 11
-#define PP_OD_FEATURE_ASIC_TDC_BIT 12
#define PP_OD_FEATURE_COUNT 13
typedef enum {
@@ -713,10 +707,8 @@ typedef struct {
//Voltage control
int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
- uint16_t VddGfxVmax; // in mV
- uint8_t IdlePwrSavingFeaturesCtrl;
- uint8_t RuntimePwrSavingFeaturesCtrl;
+ uint32_t Reserved;
//Frequency changes
int16_t GfxclkFmin; // MHz
@@ -741,12 +733,7 @@ typedef struct {
uint8_t MaxOpTemp;
uint8_t Padding[4];
- uint16_t GfxVoltageFullCtrlMode;
- uint16_t GfxclkFullCtrlMode;
- uint16_t UclkFullCtrlMode;
- int16_t AsicTdc;
-
- uint32_t Spare[10];
+ uint32_t Spare[12];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
} OverDriveTable_t;
@@ -759,10 +746,9 @@ typedef struct {
uint32_t FeatureCtrlMask;
int16_t VoltageOffsetPerZoneBoundary;
- uint16_t VddGfxVmax; // in mV
+ uint16_t Reserved1;
- uint8_t IdlePwrSavingFeaturesCtrl;
- uint8_t RuntimePwrSavingFeaturesCtrl;
+ uint16_t Reserved2;
int16_t GfxclkFmin; // MHz
int16_t GfxclkFmax; // MHz
@@ -785,12 +771,7 @@ typedef struct {
uint8_t MaxOpTemp;
uint8_t Padding[4];
- uint16_t GfxVoltageFullCtrlMode;
- uint16_t GfxclkFullCtrlMode;
- uint16_t UclkFullCtrlMode;
- int16_t AsicTdc;
-
- uint32_t Spare[10];
+ uint32_t Spare[12];
} OverDriveLimits_t;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h
index 25540cb28208..7417634827ad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h
@@ -26,7 +26,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU13_DRIVER_IF_VERSION 4
+#define SMU13_YELLOW_CARP_DRIVER_IF_VERSION 4
typedef struct {
int32_t value;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index bdccbb4a6276..252aef190c5c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,9 +123,9 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x1
+#define SMU_METRICS_TABLE_VERSION 0x5
-typedef struct {
+typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
//TEMPERATURE
@@ -198,11 +198,20 @@ typedef struct {
uint32_t SocketThmResidencyAcc;
uint32_t VrThmResidencyAcc;
uint32_t HbmThmResidencyAcc;
+ uint32_t spare;
+
+ // New Items at end to maintain driver compatibility
+ uint32_t GfxclkFrequency[8];
+
+ //PSNs
+ uint64_t PublicSerialNumber_AID[4];
+ uint64_t PublicSerialNumber_XCD[8];
+ uint64_t PublicSerialNumber_CCD[12];
} MetricsTable_t;
-#define SMU_VF_METRICS_TABLE_VERSION 0x1
+#define SMU_VF_METRICS_TABLE_VERSION 0x3
-typedef struct {
+typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
uint32_t InstGfxclk_TargFreq;
uint64_t AccGfxclk_TargFreq;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index b838e8db395a..ae4f44c4b877 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -82,7 +82,8 @@
#define PPSMC_MSG_SetSoftMaxGfxClk 0x31
#define PPSMC_MSG_GetMinGfxDpmFreq 0x32
#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33
-#define PPSMC_Message_Count 0x34
+#define PPSMC_MSG_PrepareForDriverUnload 0x34
+#define PPSMC_Message_Count 0x35
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h
index beab6d7b28b7..630132c4a76b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h
@@ -52,8 +52,7 @@ static unsigned int DbiPrbs7[] =
//4096 bytes, 256 byte aligned
-static unsigned int NoDbiPrbs7[] =
-{
+static unsigned int NoDbiPrbs7[] = {
0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
@@ -121,8 +120,7 @@ static unsigned int NoDbiPrbs7[] =
};
// 4096 bytes, 256 byte aligned
-static unsigned int DbiPrbs7[] =
-{
+static unsigned int DbiPrbs7[] = {
0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index d466db6f0ad4..a0e5ad0381d6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -64,11 +64,9 @@
#define LINK_SPEED_MAX 3
static const __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static const __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160};
static const
-struct smu_temperature_range __maybe_unused smu11_thermal_policy[] =
-{
+struct smu_temperature_range __maybe_unused smu11_thermal_policy[] = {
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
};
@@ -96,8 +94,8 @@ struct smu_11_0_dpm_table {
};
struct smu_11_0_pcie_table {
- uint8_t pcie_gen[MAX_PCIE_CONF];
- uint8_t pcie_lane[MAX_PCIE_CONF];
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
};
struct smu_11_0_dpm_tables {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h
index 0116e3d04fad..df7430876e0c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h
@@ -101,8 +101,7 @@ enum SMU_11_0_ODSETTING_ID {
};
#define SMU_11_0_MAX_ODSETTING 32 //Maximum Number of ODSettings
-struct smu_11_0_overdrive_table
-{
+struct smu_11_0_overdrive_table {
uint8_t revision; //Revision = SMU_11_0_PP_OVERDRIVE_VERSION
uint8_t reserve[3]; //Zero filled field reserved for future use
uint32_t feature_count; //Total number of supported features
@@ -127,8 +126,7 @@ enum SMU_11_0_PPCLOCK_ID {
};
#define SMU_11_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks
-struct smu_11_0_power_saving_clock_table
-{
+struct smu_11_0_power_saving_clock_table {
uint8_t revision; //Revision = SMU_11_0_PP_POWERSAVINGCLOCK_VERSION
uint8_t reserve[3]; //Zero filled field reserved for future use
uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT
@@ -136,8 +134,7 @@ struct smu_11_0_power_saving_clock_table
uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz
};
-struct smu_11_0_powerplay_table
-{
+struct smu_11_0_powerplay_table {
struct atom_common_table_header header;
uint8_t table_revision;
uint16_t table_size; //Driver portion table size. The offset to smc_pptable including header size
@@ -145,14 +142,14 @@ struct smu_11_0_powerplay_table
uint32_t golden_revision;
uint16_t format_id;
uint32_t platform_caps; //POWERPLAYABLE::ulPlatformCaps
-
+
uint8_t thermal_controller_type; //one of SMU_11_0_PP_THERMALCONTROLLER
uint16_t small_power_limit1;
uint16_t small_power_limit2;
uint16_t boost_power_limit;
- uint16_t od_turbo_power_limit; //Power limit setting for Turbo mode in Performance UI Tuning.
- uint16_t od_power_save_power_limit; //Power limit setting for PowerSave/Optimal mode in Performance UI Tuning.
+ uint16_t od_turbo_power_limit; //Power limit setting for Turbo mode in Performance UI Tuning.
+ uint16_t od_power_save_power_limit; //Power limit setting for PowerSave/Optimal mode in Performance UI Tuning.
uint16_t software_shutdown_temp;
uint16_t reserve[6]; //Zero filled field reserved for future use
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index df3baaab0037..355c156d871a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -25,17 +25,6 @@
#include "amdgpu_smu.h"
-#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
-#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_6 0x0
-
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
/* MP Apertures */
@@ -62,6 +51,8 @@
#define CTF_OFFSET_HOTSPOT 5
#define CTF_OFFSET_MEM 5
+#define SMU_13_VCLK_SHIFT 16
+
extern const int pmfw_decoded_link_speed[5];
extern const int pmfw_decoded_link_width[7];
@@ -130,6 +121,7 @@ struct smu_13_0_power_context {
uint32_t power_source;
uint8_t in_power_limit_boost_mode;
enum smu_13_0_power_state power_state;
+ atomic_t throttle_status;
};
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
@@ -303,5 +295,9 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
uint32_t *size,
uint32_t pptable_id);
+int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h
index 478862ded0bd..eb694f9f556d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h
@@ -38,13 +38,11 @@
#define SMU_13_0_7_PP_THERMALCONTROLLER_NONE 0
#define SMU_13_0_7_PP_THERMALCONTROLLER_NAVI21 28
-#define SMU_13_0_7_PP_OVERDRIVE_VERSION 0x81 // OverDrive 8 Table Version 0.2
+#define SMU_13_0_7_PP_OVERDRIVE_VERSION 0x83 // OverDrive 8 Table Version 0.2
#define SMU_13_0_7_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00
-enum SMU_13_0_7_ODFEATURE_CAP
-{
+enum SMU_13_0_7_ODFEATURE_CAP {
SMU_13_0_7_ODCAP_GFXCLK_LIMITS = 0,
- SMU_13_0_7_ODCAP_GFXCLK_CURVE,
SMU_13_0_7_ODCAP_UCLK_LIMITS,
SMU_13_0_7_ODCAP_POWER_LIMIT,
SMU_13_0_7_ODCAP_FAN_ACOUSTIC_LIMIT,
@@ -59,13 +57,12 @@ enum SMU_13_0_7_ODFEATURE_CAP
SMU_13_0_7_ODCAP_FAN_CURVE,
SMU_13_0_7_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT,
SMU_13_0_7_ODCAP_POWER_MODE,
+ SMU_13_0_7_ODCAP_PER_ZONE_GFX_VOLTAGE_OFFSET,
SMU_13_0_7_ODCAP_COUNT,
};
-enum SMU_13_0_7_ODFEATURE_ID
-{
+enum SMU_13_0_7_ODFEATURE_ID {
SMU_13_0_7_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_13_0_7_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature
- SMU_13_0_7_ODFEATURE_GFXCLK_CURVE = 1 << SMU_13_0_7_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature
SMU_13_0_7_ODFEATURE_UCLK_LIMITS = 1 << SMU_13_0_7_ODCAP_UCLK_LIMITS, //UCLK Limit feature
SMU_13_0_7_ODFEATURE_POWER_LIMIT = 1 << SMU_13_0_7_ODCAP_POWER_LIMIT, //Power Limit feature
SMU_13_0_7_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_13_0_7_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature
@@ -80,19 +77,15 @@ enum SMU_13_0_7_ODFEATURE_ID
SMU_13_0_7_ODFEATURE_FAN_CURVE = 1 << SMU_13_0_7_ODCAP_FAN_CURVE, //Fan Curve feature
SMU_13_0_7_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_13_0_7_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, //Auto Fan Acoustic RPM feature
SMU_13_0_7_ODFEATURE_POWER_MODE = 1 << SMU_13_0_7_ODCAP_POWER_MODE, //Optimized GPU Power Mode feature
+ SMU_13_0_7_ODFEATURE_PER_ZONE_GFX_VOLTAGE_OFFSET = 1 << SMU_13_0_7_ODCAP_PER_ZONE_GFX_VOLTAGE_OFFSET, //Perzone voltage offset feature
SMU_13_0_7_ODFEATURE_COUNT = 16,
};
#define SMU_13_0_7_MAX_ODFEATURE 32 //Maximum Number of OD Features
-enum SMU_13_0_7_ODSETTING_ID
-{
+enum SMU_13_0_7_ODSETTING_ID {
SMU_13_0_7_ODSETTING_GFXCLKFMAX = 0,
SMU_13_0_7_ODSETTING_GFXCLKFMIN,
- SMU_13_0_7_ODSETTING_CUSTOM_GFX_VF_CURVE_A,
- SMU_13_0_7_ODSETTING_CUSTOM_GFX_VF_CURVE_B,
- SMU_13_0_7_ODSETTING_CUSTOM_GFX_VF_CURVE_C,
- SMU_13_0_7_ODSETTING_CUSTOM_CURVE_VFT_FMIN,
SMU_13_0_7_ODSETTING_UCLKFMIN,
SMU_13_0_7_ODSETTING_UCLKFMAX,
SMU_13_0_7_ODSETTING_POWERPERCENTAGE,
@@ -117,12 +110,17 @@ enum SMU_13_0_7_ODSETTING_ID
SMU_13_0_7_ODSETTING_FAN_CURVE_SPEED_5,
SMU_13_0_7_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT,
SMU_13_0_7_ODSETTING_POWER_MODE,
+ SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_1,
+ SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_2,
+ SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_3,
+ SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_4,
+ SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_5,
+ SMU_13_0_7_ODSETTING_PER_ZONE_GFX_VOLTAGE_OFFSET_POINT_6,
SMU_13_0_7_ODSETTING_COUNT,
};
#define SMU_13_0_7_MAX_ODSETTING 64 //Maximum Number of ODSettings
-enum SMU_13_0_7_PWRMODE_SETTING
-{
+enum SMU_13_0_7_PWRMODE_SETTING {
SMU_13_0_7_PMSETTING_POWER_LIMIT_QUIET = 0,
SMU_13_0_7_PMSETTING_POWER_LIMIT_BALANCE,
SMU_13_0_7_PMSETTING_POWER_LIMIT_TURBO,
@@ -142,8 +140,7 @@ enum SMU_13_0_7_PWRMODE_SETTING
};
#define SMU_13_0_7_MAX_PMSETTING 32 //Maximum Number of PowerMode Settings
-struct smu_13_0_7_overdrive_table
-{
+struct smu_13_0_7_overdrive_table {
uint8_t revision; //Revision = SMU_13_0_7_PP_OVERDRIVE_VERSION
uint8_t reserve[3]; //Zero filled field reserved for future use
uint32_t feature_count; //Total number of supported features
@@ -154,8 +151,7 @@ struct smu_13_0_7_overdrive_table
int16_t pm_setting[SMU_13_0_7_MAX_PMSETTING]; //Optimized power mode feature settings
};
-enum SMU_13_0_7_PPCLOCK_ID
-{
+enum SMU_13_0_7_PPCLOCK_ID {
SMU_13_0_7_PPCLOCK_GFXCLK = 0,
SMU_13_0_7_PPCLOCK_SOCCLK,
SMU_13_0_7_PPCLOCK_UCLK,
@@ -173,8 +169,7 @@ enum SMU_13_0_7_PPCLOCK_ID
};
#define SMU_13_0_7_MAX_PPCLOCK 16 //Maximum Number of PP Clocks
-struct smu_13_0_7_powerplay_table
-{
+struct smu_13_0_7_powerplay_table {
struct atom_common_table_header header; //For PLUM_BONITO, header.format_revision = 15, header.content_revision = 0
uint8_t table_revision; //For PLUM_BONITO, table_revision = 2
uint8_t padding;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 9cd005131f56..704a2b577a0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -598,7 +598,7 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
- struct smu_table_context *smu_table= &smu->smu_table;
+ struct smu_table_context *smu_table = &smu->smu_table;
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
@@ -1130,7 +1130,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = arcturus_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
@@ -1169,6 +1169,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
default:
ret = -EOPNOTSUPP;
break;
@@ -1482,7 +1483,7 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
return ret;
if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
- (smu_version >=0x360d00)) {
+ (smu_version >= 0x360d00)) {
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
@@ -2113,7 +2114,6 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
}
mutex_lock(&adev->pm.mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -2130,6 +2130,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
}
r = num_msgs;
fail:
+ mutex_unlock(&adev->pm.mutex);
kfree(req);
return r;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index ca4d97b7f576..9548bd3c624b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -154,10 +154,14 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
case METRICS_CURR_UCLK:
*value = metrics->Current.MemclkFrequency;
break;
- case METRICS_AVERAGE_SOCKETPOWER:
+ case METRICS_CURR_SOCKETPOWER:
*value = (metrics->Current.CurrentSocketPower << 8) /
1000;
break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = (metrics->Average.CurrentSocketPower << 8) /
+ 1000;
+ break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->Current.GfxTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
@@ -208,12 +212,18 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu,
*(uint32_t *)data *= 100;
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
+ ret = cyan_skillfish_get_smu_metrics_data(smu,
+ METRICS_CURR_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_HOTSPOT,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index c4000518dc56..18487ae10bcf 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -136,7 +136,7 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
- MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange, 0),
+ MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALDisableDummyPstateChange, 0),
MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange, 0),
MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm, 0),
MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0),
@@ -556,7 +556,7 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
- struct smu_table_context *smu_table= &smu->smu_table;
+ struct smu_table_context *smu_table = &smu->smu_table;
SmuMetrics_legacy_t *metrics =
(SmuMetrics_legacy_t *)smu_table->metrics_table;
int ret = 0;
@@ -642,7 +642,7 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
- struct smu_table_context *smu_table= &smu->smu_table;
+ struct smu_table_context *smu_table = &smu->smu_table;
SmuMetrics_t *metrics =
(SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
@@ -731,7 +731,7 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
- struct smu_table_context *smu_table= &smu->smu_table;
+ struct smu_table_context *smu_table = &smu->smu_table;
SmuMetrics_NV12_legacy_t *metrics =
(SmuMetrics_NV12_legacy_t *)smu_table->metrics_table;
int ret = 0;
@@ -817,7 +817,7 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
- struct smu_table_context *smu_table= &smu->smu_table;
+ struct smu_table_context *smu_table = &smu->smu_table;
SmuMetrics_NV12_t *metrics =
(SmuMetrics_NV12_t *)smu_table->metrics_table;
int ret = 0;
@@ -1654,7 +1654,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, uint32_t mask)
{
- int ret = 0, size = 0;
+ int ret = 0;
uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
soft_min_level = mask ? (ffs(mask) - 1) : 0;
@@ -1675,25 +1675,25 @@ static int navi10_force_clk_levels(struct smu_context *smu,
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
- return size;
+ return 0;
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
if (ret)
- return size;
+ return 0;
ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
- return size;
+ return 0;
break;
case SMU_DCEFCLK:
- dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
+ dev_info(smu->adev->dev, "Setting DCEFCLK min/max dpm level is not supported!\n");
break;
default:
break;
}
- return size;
+ return 0;
}
static int navi10_populate_umd_state_clk(struct smu_context *smu)
@@ -2182,7 +2182,7 @@ static int navi10_read_sensor(struct smu_context *smu,
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
- if(!data || !size)
+ if (!data || !size)
return -EINVAL;
switch (sensor) {
@@ -2202,7 +2202,7 @@ static int navi10_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = navi1x_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
@@ -2240,6 +2240,7 @@ static int navi10_read_sensor(struct smu_context *smu,
ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
default:
ret = -EOPNOTSUPP;
break;
@@ -2317,15 +2318,15 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
uint32_t max_memory_clock = max_sustainable_clocks->uclock;
- if(smu->disable_uclk_switch == disable_memory_clock_switch)
+ if (smu->disable_uclk_switch == disable_memory_clock_switch)
return 0;
- if(disable_memory_clock_switch)
+ if (disable_memory_clock_switch)
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0);
else
ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0);
- if(!ret)
+ if (!ret)
smu->disable_uclk_switch = disable_memory_clock_switch;
return ret;
@@ -2559,7 +2560,8 @@ static int navi10_set_default_od_settings(struct smu_context *smu)
return 0;
}
-static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
+static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size)
+{
int i;
int ret = 0;
struct smu_table_context *table_context = &smu->smu_table;
@@ -3021,7 +3023,6 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
}
mutex_lock(&adev->pm.mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -3038,6 +3039,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
}
r = num_msgs;
fail:
+ mutex_unlock(&adev->pm.mutex);
kfree(req);
return r;
}
@@ -3368,7 +3370,7 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
ret = navi10_get_gpu_metrics(smu, table);
else
- ret =navi10_get_legacy_gpu_metrics(smu, table);
+ ret = navi10_get_legacy_gpu_metrics(smu, table);
break;
}
@@ -3413,26 +3415,8 @@ static int navi10_post_smu_init(struct smu_context *smu)
return 0;
ret = navi10_run_umc_cdr_workaround(smu);
- if (ret) {
+ if (ret)
dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
- return ret;
- }
-
- if (!smu->dc_controlled_by_gpio) {
- /*
- * For Navi1X, manually switch it to AC mode as PMFW
- * may boot it with DC mode.
- */
- ret = smu_v11_0_set_power_source(smu,
- adev->pm.ac_power ?
- SMU_POWER_SOURCE_AC :
- SMU_POWER_SOURCE_DC);
- if (ret) {
- dev_err(adev->dev, "Failed to switch to %s mode!\n",
- adev->pm.ac_power ? "AC" : "DC");
- return ret;
- }
- }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 75f18681e984..4bb289f9b4b8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -431,7 +431,13 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)
{
struct atom_smc_dpm_info_v4_9 *smc_dpm_table;
int index, ret;
- I2cControllerConfig_t *table_member;
+ PPTable_beige_goby_t *ppt_beige_goby;
+ PPTable_t *ppt;
+
+ if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
+ ppt_beige_goby = smu->smu_table.driver_pptable;
+ else
+ ppt = smu->smu_table.driver_pptable;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
smc_dpm_info);
@@ -440,9 +446,13 @@ static int sienna_cichlid_append_powerplay_table(struct smu_context *smu)
(uint8_t **)&smc_dpm_table);
if (ret)
return ret;
- GET_PPTABLE_MEMBER(I2cControllers, &table_member);
- memcpy(table_member, smc_dpm_table->I2cControllers,
- sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
+
+ if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
+ smu_memcpy_trailing(ppt_beige_goby, I2cControllers, BoardReserved,
+ smc_dpm_table, I2cControllers);
+ else
+ smu_memcpy_trailing(ppt, I2cControllers, BoardReserved,
+ smc_dpm_table, I2cControllers);
return 0;
}
@@ -578,7 +588,9 @@ err0_out:
return -ENOMEM;
}
-static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu)
+static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu,
+ bool use_metrics_v3,
+ bool use_metrics_v2)
{
struct smu_table_context *smu_table= &smu->smu_table;
SmuMetricsExternal_t *metrics_ext =
@@ -586,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
uint32_t throttler_status = 0;
int i;
- if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4900)) {
+ if (use_metrics_v3) {
for (i = 0; i < THROTTLER_COUNT; i++)
throttler_status |=
(metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0);
- } else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4300)) {
+ } else if (use_metrics_v2) {
for (i = 0; i < THROTTLER_COUNT; i++)
throttler_status |=
(metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0);
@@ -854,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_THROTTLER_STATUS:
- *value = sienna_cichlid_get_throttler_status_locked(smu);
+ *value = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
break;
case METRICS_CURR_FANSPEED:
*value = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
@@ -1892,7 +1902,7 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = sienna_cichlid_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
@@ -1917,12 +1927,16 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
+ ret = sienna_cichlid_get_smu_metrics_data(smu,
+ METRICS_CURR_UCLK,
+ (uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
+ ret = sienna_cichlid_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_GFXCLK,
+ (uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
@@ -1948,6 +1962,7 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
}
break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
default:
ret = -EOPNOTSUPP;
break;
@@ -2072,28 +2087,36 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
-
- uint32_t smu_pcie_arg;
- uint8_t *table_member1, *table_member2;
+ struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+ u32 smu_pcie_arg;
int ret, i;
- GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
- GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
+ /* PCIE gen speed and lane width override */
+ if (!amdgpu_device_pcie_dynamic_switching_supported()) {
+ if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
+
+ if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
- /* lclk dpm table setup */
- for (i = 0; i < MAX_PCIE_CONF; i++) {
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i];
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i];
+ /* Force all levels to use the same settings */
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ }
+ } else {
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ if (pcie_table->pcie_lane[i] > pcie_width_cap)
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ }
}
for (i = 0; i < NUM_LINK_LEVELS; i++) {
- smu_pcie_arg = (i << 16) |
- ((table_member1[i] <= pcie_gen_cap) ?
- (table_member1[i] << 8) :
- (pcie_gen_cap << 8)) |
- ((table_member2[i] <= pcie_width_cap) ?
- table_member2[i] :
- pcie_width_cap);
+ smu_pcie_arg = (i << 16 |
+ pcie_table->pcie_gen[i] << 8 |
+ pcie_table->pcie_lane[i]);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
@@ -2101,11 +2124,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
NULL);
if (ret)
return ret;
-
- if (table_member1[i] > pcie_gen_cap)
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
- if (table_member2[i] > pcie_width_cap)
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
}
return 0;
@@ -3776,7 +3794,6 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
}
mutex_lock(&adev->pm.mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -3793,6 +3810,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
}
r = num_msgs;
fail:
+ mutex_unlock(&adev->pm.mutex);
kfree(req);
return r;
}
@@ -4000,7 +4018,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] :
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1];
- gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu);
+ gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
gpu_metrics->indep_throttle_status =
smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
sienna_cichlid_throttler_map);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index e1ef88ee1ed3..aa4a5498a12f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1412,13 +1412,8 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
- /*
- * SW CTF just occurred.
- * Try to do a graceful shutdown to prevent further damage.
- */
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
- orderly_poweroff(true);
+ schedule_delayed_work(&smu->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
break;
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 7433dcaa16e0..201cec599842 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -390,6 +390,10 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->Current.UvdActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
+ *value = (metrics->Average.CurrentSocketPower << 8) /
+ 1000;
+ break;
+ case METRICS_CURR_SOCKETPOWER:
*value = (metrics->Current.CurrentSocketPower << 8) /
1000;
break;
@@ -582,7 +586,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_legacy_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
@@ -656,7 +660,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
case SMU_MCLK:
case SMU_FCLK:
for (i = 0; i < count; i++) {
- ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret)
return ret;
if (!value)
@@ -683,7 +688,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
uint32_t min, max;
@@ -765,7 +770,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
case SMU_MCLK:
case SMU_FCLK:
for (i = 0; i < count; i++) {
- ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret)
return ret;
if (!value)
@@ -1534,12 +1540,18 @@ static int vangogh_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
+ ret = vangogh_common_get_smu_metrics_data(smu,
+ METRICS_CURR_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_EDGE_TEMP:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE,
@@ -1852,6 +1864,86 @@ static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_3);
}
+static ssize_t vangogh_get_gpu_metrics_v2_4(struct smu_context *smu,
+ void **table)
+{
+ SmuMetrics_t metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_4 *gpu_metrics =
+ (struct gpu_metrics_v2_4 *)smu_table->gpu_metrics_table;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 4);
+
+ gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.Current.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
+
+ gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature;
+ gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature;
+ memcpy(&gpu_metrics->average_temperature_core[0],
+ &metrics.Average.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0];
+
+ gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Current.Power[0];
+ gpu_metrics->average_soc_power = metrics.Current.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Current.Power[2];
+
+ gpu_metrics->average_cpu_voltage = metrics.Current.Voltage[0];
+ gpu_metrics->average_soc_voltage = metrics.Current.Voltage[1];
+ gpu_metrics->average_gfx_voltage = metrics.Current.Voltage[2];
+
+ gpu_metrics->average_cpu_current = metrics.Current.Current[0];
+ gpu_metrics->average_soc_current = metrics.Current.Current[1];
+ gpu_metrics->average_gfx_current = metrics.Current.Current[2];
+
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.Average.CorePower[0],
+ sizeof(uint16_t) * 4);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+ gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+ gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+ gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.Current.CoreFrequency[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
+
+ gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
+ vangogh_throttler_map);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_4);
+}
+
static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1921,23 +2013,34 @@ static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
{
uint32_t if_version;
uint32_t smu_version;
+ uint32_t smu_program;
+ uint32_t fw_version;
int ret = 0;
ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
- if (ret) {
+ if (ret)
return ret;
- }
- if (smu_version >= 0x043F3E00) {
- if (if_version < 0x3)
- ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table);
+ smu_program = (smu_version >> 24) & 0xff;
+ fw_version = smu_version & 0xffffff;
+ if (smu_program == 6) {
+ if (fw_version >= 0x3F0800)
+ ret = vangogh_get_gpu_metrics_v2_4(smu, table);
else
ret = vangogh_get_gpu_metrics_v2_3(smu, table);
+
} else {
- if (if_version < 0x3)
- ret = vangogh_get_legacy_gpu_metrics(smu, table);
- else
- ret = vangogh_get_gpu_metrics(smu, table);
+ if (smu_version >= 0x043F3E00) {
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table);
+ else
+ ret = vangogh_get_gpu_metrics_v2_3(smu, table);
+ } else {
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_gpu_metrics(smu, table);
+ else
+ ret = vangogh_get_gpu_metrics(smu, table);
+ }
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 5cdc07165480..c8119491c516 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -262,15 +262,15 @@ static int renoir_get_profiling_clk_mask(struct smu_context *smu,
/* mclk levels are in reverse order */
*mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
- if(sclk_mask)
+ if (sclk_mask)
/* The sclk as gfxclk and has three level about max/min/current */
*sclk_mask = 3 - 1;
- if(mclk_mask)
+ if (mclk_mask)
/* mclk levels are in reverse order */
*mclk_mask = 0;
- if(soc_mask)
+ if (soc_mask)
*soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
}
@@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
static int renoir_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
case SMU_VCLK:
case SMU_DCLK:
for (i = 0; i < count; i++) {
- ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value);
if (ret)
return ret;
if (!value)
@@ -1196,7 +1197,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_VCNACTIVITY:
*value = metrics->AverageUvdActivity / 100;
break;
- case METRICS_AVERAGE_SOCKETPOWER:
+ case METRICS_CURR_SOCKETPOWER:
if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) ||
((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200)))
*value = metrics->CurrentSocketPower << 8;
@@ -1296,9 +1297,9 @@ static int renoir_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = renoir_get_smu_metrics_data(smu,
- METRICS_AVERAGE_SOCKETPOWER,
+ METRICS_CURR_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
@@ -1314,6 +1315,7 @@ static int renoir_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index c788aa7a99a9..5e408a195860 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -205,7 +205,8 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu)
return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
}
-int smu_v12_0_mode2_reset(struct smu_context *smu){
+int smu_v12_0_mode2_reset(struct smu_context *smu)
+{
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index d30ec3005ea1..cc3169400c9b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -94,8 +94,7 @@
*/
#define SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION 0x00443300
-static const struct smu_temperature_range smu13_thermal_policy[] =
-{
+static const struct smu_temperature_range smu13_thermal_policy[] = {
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
};
@@ -196,7 +195,7 @@ static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUN
ALDEBARAN_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL_BIT),
ALDEBARAN_FEA_MAP(SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, FEATURE_OUT_OF_BAND_MONITOR_BIT),
- ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT,FEATURE_XGMI_PER_LINK_PWR_DWN),
+ ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DWN),
ALDEBARAN_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE),
};
@@ -580,7 +579,7 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
- struct smu_table_context *smu_table= &smu->smu_table;
+ struct smu_table_context *smu_table = &smu->smu_table;
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
@@ -626,9 +625,10 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
break;
case METRICS_AVERAGE_SOCKETPOWER:
/* Valid power data is available only from primary die */
- *value = aldebaran_is_primary(smu) ?
- metrics->AverageSocketPower << 8 :
- 0;
+ if (aldebaran_is_primary(smu))
+ *value = metrics->AverageSocketPower << 8;
+ else
+ ret = -EOPNOTSUPP;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->TemperatureEdge *
@@ -1095,16 +1095,6 @@ static int aldebaran_get_current_activity_percent(struct smu_context *smu,
return ret;
}
-static int aldebaran_get_gpu_power(struct smu_context *smu, uint32_t *value)
-{
- if (!value)
- return -EINVAL;
-
- return aldebaran_get_smu_metrics_data(smu,
- METRICS_AVERAGE_SOCKETPOWER,
- value);
-}
-
static int aldebaran_thermal_get_temperature(struct smu_context *smu,
enum amd_pp_sensors sensor,
uint32_t *value)
@@ -1158,8 +1148,10 @@ static int aldebaran_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
- ret = aldebaran_get_gpu_power(smu, (uint32_t *)data);
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ ret = aldebaran_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_SOCKETPOWER,
+ (uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
@@ -1184,6 +1176,7 @@ static int aldebaran_read_sensor(struct smu_context *smu,
ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
default:
ret = -EOPNOTSUPP;
break;
@@ -1525,7 +1518,6 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
}
mutex_lock(&adev->pm.mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -1542,6 +1534,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
}
r = num_msgs;
fail:
+ mutex_unlock(&adev->pm.mutex);
kfree(req);
return r;
}
@@ -1906,8 +1899,7 @@ static int aldebaran_mode1_reset(struct smu_context *smu)
smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (smu_version < 0x00440700) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
- }
- else {
+ } else {
/* fatal error triggered by ras, PMFW supports the flag
from 68.44.0 */
if ((smu_version >= 0x00442c00) && ras &&
@@ -2116,7 +2108,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.register_irq_handler = smu_v13_0_register_irq_handler,
.set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support= aldebaran_is_baco_supported,
+ .baco_is_support = aldebaran_is_baco_supported,
.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
.od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
@@ -2147,5 +2139,6 @@ void aldebaran_set_ppt_funcs(struct smu_context *smu)
smu->clock_map = aldebaran_clk_map;
smu->feature_map = aldebaran_feature_mask_map;
smu->table_map = aldebaran_table_map;
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 393c6a7b9609..f1282fc4b90a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -83,7 +83,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static const int link_speed[] = {25, 50, 80, 160};
const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
@@ -269,45 +268,10 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu_major = (smu_version >> 16) & 0xff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
- if (smu->is_apu)
+ if (smu->is_apu ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6))
adev->pm.fw_version = smu_version;
- switch (adev->ip_versions[MP1_HWIP][0]) {
- case IP_VERSION(13, 0, 2):
- smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
- break;
- case IP_VERSION(13, 0, 0):
- smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0;
- break;
- case IP_VERSION(13, 0, 10):
- smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10;
- break;
- case IP_VERSION(13, 0, 7):
- smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_7;
- break;
- case IP_VERSION(13, 0, 1):
- case IP_VERSION(13, 0, 3):
- case IP_VERSION(13, 0, 8):
- smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
- break;
- case IP_VERSION(13, 0, 4):
- case IP_VERSION(13, 0, 11):
- smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_4;
- break;
- case IP_VERSION(13, 0, 5):
- smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
- break;
- case IP_VERSION(13, 0, 6):
- smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_6;
- adev->pm.fw_version = smu_version;
- break;
- default:
- dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
- adev->ip_versions[MP1_HWIP][0]);
- smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV;
- break;
- }
-
/* only for dGPU w/ SMU13*/
if (adev->pm.fw)
dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
@@ -502,17 +466,26 @@ int smu_v13_0_init_smc_tables(struct smu_context *smu)
ret = -ENOMEM;
goto err3_out;
}
+
+ smu_table->user_overdrive_table =
+ kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
+ if (!smu_table->user_overdrive_table) {
+ ret = -ENOMEM;
+ goto err4_out;
+ }
}
smu_table->combo_pptable =
kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
if (!smu_table->combo_pptable) {
ret = -ENOMEM;
- goto err4_out;
+ goto err5_out;
}
return 0;
+err5_out:
+ kfree(smu_table->user_overdrive_table);
err4_out:
kfree(smu_table->boot_overdrive_table);
err3_out:
@@ -532,12 +505,14 @@ int smu_v13_0_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->gpu_metrics_table);
kfree(smu_table->combo_pptable);
+ kfree(smu_table->user_overdrive_table);
kfree(smu_table->boot_overdrive_table);
kfree(smu_table->overdrive_table);
kfree(smu_table->max_sustainable_clocks);
kfree(smu_table->driver_pptable);
smu_table->gpu_metrics_table = NULL;
smu_table->combo_pptable = NULL;
+ smu_table->user_overdrive_table = NULL;
smu_table->boot_overdrive_table = NULL;
smu_table->overdrive_table = NULL;
smu_table->max_sustainable_clocks = NULL;
@@ -573,11 +548,11 @@ int smu_v13_0_init_power(struct smu_context *smu)
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
- smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
+ smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context),
GFP_KERNEL);
if (!smu_power->power_context)
return -ENOMEM;
- smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context);
+ smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
return 0;
}
@@ -1145,7 +1120,7 @@ smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
- if(clk_select == SMU_UCLK)
+ if (clk_select == SMU_UCLK)
smu->hard_min_uclk_req_from_dal = clk_freq;
}
@@ -1377,13 +1352,8 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
- dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
- /*
- * SW CTF just occurred.
- * Try to do a graceful shutdown to prevent further damage.
- */
- dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
- orderly_poweroff(true);
+ schedule_delayed_work(&smu->swctf_delayed_work,
+ msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
break;
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
@@ -1466,8 +1436,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
return 0;
}
-static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs =
-{
+static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs = {
.set = smu_v13_0_set_irq_state,
.process = smu_v13_0_irq_process,
};
@@ -1962,7 +1931,7 @@ static int smu_v13_0_get_dpm_level_count(struct smu_context *smu,
ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
/* SMU v13.0.2 FW returns 0 based max level, increment by one for it */
- if((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) && (!ret && value))
+ if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) && (!ret && value))
++(*value);
return ret;
@@ -2293,7 +2262,7 @@ int smu_v13_0_baco_set_state(struct smu_context *smu,
if (state == SMU_BACO_STATE_ENTER) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
- smu_baco->maco_support ?
+ (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
BACO_SEQ_BAMACO : BACO_SEQ_BACO,
NULL);
} else {
@@ -2453,3 +2422,51 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
return ret;
}
+
+int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+ struct smu_13_0_pcie_table *pcie_table =
+ &dpm_context->dpm_tables.pcie_table;
+ int num_of_levels = pcie_table->num_of_link_levels;
+ uint32_t smu_pcie_arg;
+ int ret, i;
+
+ if (!amdgpu_device_pcie_dynamic_switching_supported()) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ if (pcie_table->pcie_lane[i] > pcie_width_cap)
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ }
+ }
+
+ for (i = 0; i < num_of_levels; i++) {
+ smu_pcie_arg = i << 16;
+ smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
+ smu_pcie_arg |= pcie_table->pcie_lane[i];
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 09405ef1e3c8..8b7403ba89d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -46,7 +46,6 @@
#include "asic_reg/mp/mp_13_0_0_sh_mask.h"
#include "smu_cmn.h"
#include "amdgpu_ras.h"
-#include "umc_v8_10.h"
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -97,6 +96,14 @@
*/
#define SUPPORT_ECCTABLE_SMU_13_0_10_VERSION 0x00502200
+#define PP_OD_FEATURE_GFXCLK_FMIN 0
+#define PP_OD_FEATURE_GFXCLK_FMAX 1
+#define PP_OD_FEATURE_UCLK_FMIN 2
+#define PP_OD_FEATURE_UCLK_FMAX 3
+#define PP_OD_FEATURE_GFX_VF_CURVE 4
+
+#define LINK_SPEED_MAX 3
+
static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -237,6 +244,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
TAB_MAP(I2C_COMMANDS),
TAB_MAP(ECCINFO),
+ TAB_MAP(OVERDRIVE),
};
static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -331,6 +339,14 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
struct smu_13_0_0_powerplay_table *powerplay_table =
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+#if 0
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsMin;
+#endif
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@@ -342,14 +358,30 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
smu_baco->maco_support = true;
- table_context->thermal_controller_type =
- powerplay_table->thermal_controller_type;
+ /*
+ * We are in the transition to a new OD mechanism.
+ * Disable the OD feature support for SMU13 temporarily.
+ * TODO: get this reverted when new OD mechanism online
+ */
+#if 0
+ if (!overdrive_lowerlimits->FeatureCtrlMask ||
+ !overdrive_upperlimits->FeatureCtrlMask)
+ smu->od_enabled = false;
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
+#else
+ smu->od_enabled = false;
+#endif
+
+ table_context->thermal_controller_type =
+ powerplay_table->thermal_controller_type;
+
+ smu->adev->pm.no_fan =
+ !(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
return 0;
}
@@ -461,7 +493,7 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTableExternal_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -923,7 +955,7 @@ static int smu_v13_0_0_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v13_0_0_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
@@ -967,6 +999,7 @@ static int smu_v13_0_0_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
default:
ret = -EOPNOTSUPP;
break;
@@ -1022,17 +1055,116 @@ static int smu_v13_0_0_get_current_clk_freq_by_table(struct smu_context *smu,
value);
}
+static bool smu_v13_0_0_is_od_feature_supported(struct smu_context *smu,
+ int od_feature_bit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+
+ return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
+}
+
+static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu,
+ int od_feature_bit,
+ int32_t *min,
+ int32_t *max)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsMin;
+ int32_t od_min_setting, od_max_setting;
+
+ switch (od_feature_bit) {
+ case PP_OD_FEATURE_GFXCLK_FMIN:
+ od_min_setting = overdrive_lowerlimits->GfxclkFmin;
+ od_max_setting = overdrive_upperlimits->GfxclkFmin;
+ break;
+ case PP_OD_FEATURE_GFXCLK_FMAX:
+ od_min_setting = overdrive_lowerlimits->GfxclkFmax;
+ od_max_setting = overdrive_upperlimits->GfxclkFmax;
+ break;
+ case PP_OD_FEATURE_UCLK_FMIN:
+ od_min_setting = overdrive_lowerlimits->UclkFmin;
+ od_max_setting = overdrive_upperlimits->UclkFmin;
+ break;
+ case PP_OD_FEATURE_UCLK_FMAX:
+ od_min_setting = overdrive_lowerlimits->UclkFmax;
+ od_max_setting = overdrive_upperlimits->UclkFmax;
+ break;
+ case PP_OD_FEATURE_GFX_VF_CURVE:
+ od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary;
+ od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary;
+ break;
+ default:
+ od_min_setting = od_max_setting = INT_MAX;
+ break;
+ }
+
+ if (min)
+ *min = od_min_setting;
+ if (max)
+ *max = od_max_setting;
+}
+
+static void smu_v13_0_0_dump_od_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
+ od_table->OverDriveTable.GfxclkFmax);
+ dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
+ od_table->OverDriveTable.UclkFmax);
+}
+
+static int smu_v13_0_0_get_overdrive_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ int ret = 0;
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_OVERDRIVE,
+ 0,
+ (void *)od_table,
+ false);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
+
+ return ret;
+}
+
+static int smu_v13_0_0_upload_overdrive_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ int ret = 0;
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_OVERDRIVE,
+ 0,
+ (void *)od_table,
+ true);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+
+ return ret;
+}
+
static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
char *buf)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
- const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
uint32_t gen_speed, lane_width;
int i, curr_freq, size = 0;
+ int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
@@ -1145,10 +1277,88 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
pcie_table->clk_freq[i],
(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
- (lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ?
+ (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
"*" : "");
break;
+ case SMU_OD_SCLK:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_GFXCLK_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ od_table->OverDriveTable.GfxclkFmin,
+ od_table->OverDriveTable.GfxclkFmax);
+ break;
+
+ case SMU_OD_MCLK:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_UCLK_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n",
+ od_table->OverDriveTable.UclkFmin,
+ od_table->OverDriveTable.UclkFmax);
+ break;
+
+ case SMU_OD_VDDC_CURVE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n");
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ size += sysfs_emit_at(buf, size, "%d: %dmv\n",
+ i,
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]);
+ break;
+
+ case SMU_OD_RANGE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
+ !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
+ !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+
+ if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMIN,
+ &min_value,
+ NULL);
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ NULL,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMIN,
+ &min_value,
+ NULL);
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMAX,
+ NULL,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n",
+ min_value, max_value);
+ }
+ break;
+
default:
break;
}
@@ -1156,6 +1366,217 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
return size;
}
+static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[],
+ uint32_t size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t offset_of_voltageoffset;
+ int32_t minimum, maximum;
+ uint32_t feature_ctrlmask;
+ int i, ret = 0;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
+ dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMIN,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.GfxclkFmin = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
+ break;
+
+ case 1:
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.GfxclkFmax = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
+ break;
+
+ default:
+ dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ }
+
+ if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
+ dev_err(adev->dev,
+ "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
+ (uint32_t)od_table->OverDriveTable.GfxclkFmin,
+ (uint32_t)od_table->OverDriveTable.GfxclkFmax);
+ return -EINVAL;
+ }
+ break;
+
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
+ dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMIN,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.UclkFmin = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
+ break;
+
+ case 1:
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.UclkFmax = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
+ break;
+
+ default:
+ dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ }
+
+ if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) {
+ dev_err(adev->dev,
+ "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n",
+ (uint32_t)od_table->OverDriveTable.UclkFmin,
+ (uint32_t)od_table->OverDriveTable.UclkFmax);
+ return -EINVAL;
+ }
+ break;
+
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
+ dev_warn(adev->dev, "VF curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS ||
+ input[0] < 0)
+ return -EINVAL;
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &minimum,
+ &maximum);
+ if (input[1] < minimum ||
+ input[1] > maximum) {
+ dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ input[1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
+ memcpy(od_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTableExternal_t));
+ od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ fallthrough;
+
+ case PP_OD_COMMIT_DPM_TABLE:
+ /*
+ * The member below instructs PMFW the settings focused in
+ * this single operation.
+ * `uint32_t FeatureCtrlMask;`
+ * It does not contain actual informations about user's custom
+ * settings. Thus we do not cache it.
+ */
+ offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary);
+ if (memcmp((u8 *)od_table + offset_of_voltageoffset,
+ table_context->user_overdrive_table + offset_of_voltageoffset,
+ sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) {
+ smu_v13_0_0_dump_od_table(smu, od_table);
+
+ ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ od_table->OverDriveTable.FeatureCtrlMask = 0;
+ memcpy(table_context->user_overdrive_table + offset_of_voltageoffset,
+ (u8 *)od_table + offset_of_voltageoffset,
+ sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset);
+
+ if (!memcmp(table_context->user_overdrive_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTableExternal_t)))
+ smu->user_dpm_profile.user_od = false;
+ else
+ smu->user_dpm_profile.user_od = true;
+ }
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t mask)
@@ -1235,37 +1656,6 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
return ret;
}
-static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
- uint32_t pcie_gen_cap,
- uint32_t pcie_width_cap)
-{
- struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_pcie_table *pcie_table =
- &dpm_context->dpm_tables.pcie_table;
- uint32_t smu_pcie_arg;
- int ret, i;
-
- for (i = 0; i < pcie_table->num_of_link_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
- pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
- pcie_table->pcie_lane[i] = pcie_width_cap;
-
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static const struct smu_temperature_range smu13_thermal_policy[] = {
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
@@ -1300,6 +1690,7 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
+ range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
return 0;
}
@@ -1354,7 +1745,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
- gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
+ gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
@@ -1371,7 +1762,10 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
gpu_metrics->pcie_link_width = metrics->PcieWidth;
- gpu_metrics->pcie_link_speed = metrics->PcieRate;
+ if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
+ gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
+ else
+ gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
@@ -1384,6 +1778,78 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu)
+{
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
+ OverDriveTableExternal_t *boot_od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table;
+ OverDriveTableExternal_t *user_od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table;
+ OverDriveTableExternal_t user_od_table_bak;
+ int ret = 0;
+ int i;
+
+ ret = smu_v13_0_0_get_overdrive_table(smu, boot_od_table);
+ if (ret)
+ return ret;
+
+ smu_v13_0_0_dump_od_table(smu, boot_od_table);
+
+ memcpy(od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+
+ /*
+ * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
+ * but we have to preserve user defined values in "user_od_table".
+ */
+ if (!smu->adev->in_suspend) {
+ memcpy(user_od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+ smu->user_dpm_profile.user_od = false;
+ } else if (smu->user_dpm_profile.user_od) {
+ memcpy(&user_od_table_bak,
+ user_od_table,
+ sizeof(OverDriveTableExternal_t));
+ memcpy(user_od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+ user_od_table->OverDriveTable.GfxclkFmin =
+ user_od_table_bak.OverDriveTable.GfxclkFmin;
+ user_od_table->OverDriveTable.GfxclkFmax =
+ user_od_table_bak.OverDriveTable.GfxclkFmax;
+ user_od_table->OverDriveTable.UclkFmin =
+ user_od_table_bak.OverDriveTable.UclkFmin;
+ user_od_table->OverDriveTable.UclkFmax =
+ user_od_table_bak.OverDriveTable.UclkFmax;
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
+ user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_0_restore_user_od_settings(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table = table_context->overdrive_table;
+ OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
+ int res;
+
+ user_od_table->OverDriveTable.FeatureCtrlMask = 1U << PP_OD_FEATURE_GFXCLK_BIT |
+ 1U << PP_OD_FEATURE_UCLK_BIT |
+ 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ res = smu_v13_0_0_upload_overdrive_table(smu, user_od_table);
+ user_od_table->OverDriveTable.FeatureCtrlMask = 0;
+ if (res == 0)
+ memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t));
+
+ return res;
+}
+
static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context =
@@ -1696,10 +2162,39 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
}
}
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
+ (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
+ ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_COMPUTE_BIT,
+ (void *)(&activity_monitor_external),
+ false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
+
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_CUSTOM);
+ } else {
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode);
+ }
+
if (workload_type < 0)
return -EINVAL;
@@ -1716,7 +2211,8 @@ static int smu_v13_0_0_baco_enter(struct smu_context *smu)
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v13_0_baco_set_armd3_sequence(smu,
- smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
+ BACO_SEQ_BAMACO : BACO_SEQ_BACO);
else
return smu_v13_0_baco_enter(smu);
}
@@ -1808,7 +2304,6 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
}
mutex_lock(&adev->pm.mutex);
r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -1825,6 +2320,7 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
}
r = num_msgs;
fail:
+ mutex_unlock(&adev->pm.mutex);
kfree(req);
return r;
}
@@ -2097,7 +2593,7 @@ static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
- for (i = 0; i < UMC_V8_10_TOTAL_CHANNEL_NUM(adev); i++) {
+ for (i = 0; i < ARRAY_SIZE(ecc_table->EccInfo); i++) {
ecc_info_per_channel = &(eccinfo->ecc[i]);
ecc_info_per_channel->ce_count_lo_chip =
ecc_table->EccInfo[i].ce_count_lo_chip;
@@ -2142,7 +2638,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_0_print_clk_levels,
.force_clk_levels = smu_v13_0_0_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
@@ -2150,6 +2646,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
.get_gpu_metrics = smu_v13_0_0_get_gpu_metrics,
.set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range,
+ .set_default_od_settings = smu_v13_0_0_set_default_od_settings,
+ .restore_user_od_settings = smu_v13_0_0_restore_user_od_settings,
+ .od_edit_dpm_table = smu_v13_0_0_od_edit_dpm_table,
.init_pptable_microcode = smu_v13_0_init_pptable_microcode,
.populate_umd_state_clk = smu_v13_0_0_populate_umd_state_clk,
.set_performance_level = smu_v13_0_set_performance_level,
@@ -2199,5 +2698,6 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
smu->table_map = smu_v13_0_0_table_map;
smu->pwr_src_map = smu_v13_0_0_pwr_src_map;
smu->workload_map = smu_v13_0_0_workload_map;
+ smu->smc_driver_if_version = SMU13_0_0_DRIVER_IF_VERSION;
smu_v13_0_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 8fa9a36c38b6..626591f54bc4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -54,6 +54,10 @@
#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMU_13_0_4_UMD_PSTATE_GFXCLK 938
+#define SMU_13_0_4_UMD_PSTATE_SOCCLK 938
+#define SMU_13_0_4_UMD_PSTATE_FCLK 1875
+
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
@@ -253,7 +257,7 @@ static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_activity = metrics.GfxActivity;
gpu_metrics->average_mm_activity = metrics.UvdActivity;
- gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
+ gpu_metrics->average_socket_power = metrics.AverageSocketPower;
gpu_metrics->average_gfx_power = metrics.Power[0];
gpu_metrics->average_soc_power = metrics.Power[1];
memcpy(&gpu_metrics->average_core_power[0],
@@ -317,6 +321,9 @@ static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->UvdActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
+ *value = (metrics->AverageSocketPower << 8) / 1000;
+ break;
+ case METRICS_CURR_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) / 1000;
break;
case METRICS_TEMPERATURE_EDGE:
@@ -478,7 +485,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
@@ -512,7 +519,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
break;
for (i = 0; i < count; i++) {
- ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
break;
@@ -564,12 +572,18 @@ static int smu_v13_0_4_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v13_0_4_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
+ ret = smu_v13_0_4_get_smu_metrics_data(smu,
+ METRICS_CURR_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_EDGE_TEMP:
ret = smu_v13_0_4_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE,
@@ -830,6 +844,8 @@ static int smu_v13_0_4_set_soft_freq_limited_range(struct smu_context *smu,
uint32_t max)
{
enum smu_message_type msg_set_min, msg_set_max;
+ uint32_t min_clk = min;
+ uint32_t max_clk = max;
int ret = 0;
if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type))
@@ -858,12 +874,17 @@ static int smu_v13_0_4_set_soft_freq_limited_range(struct smu_context *smu,
return -EINVAL;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
+ if (clk_type == SMU_VCLK) {
+ min_clk = min << SMU_13_VCLK_SHIFT;
+ max_clk = max << SMU_13_VCLK_SHIFT;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min_clk, NULL);
if (ret)
return ret;
return smu_cmn_send_smc_msg_with_param(smu, msg_set_max,
- max, NULL);
+ max_clk, NULL);
}
static int smu_v13_0_4_force_clk_levels(struct smu_context *smu,
@@ -900,6 +921,50 @@ static int smu_v13_0_4_force_clk_levels(struct smu_context *smu,
return ret;
}
+static int smu_v13_0_4_get_dpm_profile_freq(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ enum smu_clk_type clk_type,
+ uint32_t *min_clk,
+ uint32_t *max_clk)
+{
+ int ret = 0;
+ uint32_t clk_limit = 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clk_limit = SMU_13_0_4_UMD_PSTATE_GFXCLK;
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit);
+ else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL);
+ break;
+ case SMU_SOCCLK:
+ clk_limit = SMU_13_0_4_UMD_PSTATE_SOCCLK;
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit);
+ break;
+ case SMU_FCLK:
+ clk_limit = SMU_13_0_4_UMD_PSTATE_FCLK;
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit);
+ else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL);
+ break;
+ case SMU_VCLK:
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit);
+ break;
+ case SMU_DCLK:
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ *min_clk = *max_clk = clk_limit;
+ return ret;
+}
+
static int smu_v13_0_4_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
@@ -907,6 +972,8 @@ static int smu_v13_0_4_set_performance_level(struct smu_context *smu,
uint32_t sclk_min = 0, sclk_max = 0;
uint32_t fclk_min = 0, fclk_max = 0;
uint32_t socclk_min = 0, socclk_max = 0;
+ uint32_t vclk_min = 0, vclk_max = 0;
+ uint32_t dclk_min = 0, dclk_max = 0;
int ret = 0;
switch (level) {
@@ -914,28 +981,42 @@ static int smu_v13_0_4_set_performance_level(struct smu_context *smu,
smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max);
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max);
sclk_min = sclk_max;
fclk_min = fclk_max;
socclk_min = socclk_max;
+ vclk_min = vclk_max;
+ dclk_min = dclk_max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL);
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL);
sclk_max = sclk_min;
fclk_max = fclk_min;
socclk_max = socclk_min;
+ vclk_max = vclk_min;
+ dclk_max = dclk_min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max);
+ smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- /* Temporarily do nothing since the optimal clocks haven't been provided yet */
+ smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max);
+ smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max);
+ smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max);
+ smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max);
+ smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -975,6 +1056,23 @@ static int smu_v13_0_4_set_performance_level(struct smu_context *smu,
return ret;
}
+ if (vclk_min && vclk_max) {
+ ret = smu_v13_0_4_set_soft_freq_limited_range(smu,
+ SMU_VCLK,
+ vclk_min,
+ vclk_max);
+ if (ret)
+ return ret;
+ }
+
+ if (dclk_min && dclk_max) {
+ ret = smu_v13_0_4_set_soft_freq_limited_range(smu,
+ SMU_DCLK,
+ dclk_min,
+ dclk_max);
+ if (ret)
+ return ret;
+ }
return ret;
}
@@ -1043,6 +1141,7 @@ void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu)
smu->message_map = smu_v13_0_4_message_map;
smu->feature_map = smu_v13_0_4_feature_mask_map;
smu->table_map = smu_v13_0_4_table_map;
+ smu->smc_driver_if_version = SMU13_0_4_DRIVER_IF_VERSION;
smu->is_apu = true;
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 4))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 66445964efbd..c6e7c2115a26 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -75,7 +75,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_5_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
- MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu , 1),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1),
MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1),
MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1),
MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1),
@@ -288,7 +288,7 @@ static int smu_v13_0_5_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_VCNACTIVITY:
*value = metrics->UvdActivity;
break;
- case METRICS_AVERAGE_SOCKETPOWER:
+ case METRICS_CURR_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) / 1000;
break;
case METRICS_TEMPERATURE_EDGE:
@@ -332,9 +332,9 @@ static int smu_v13_0_5_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = smu_v13_0_5_get_smu_metrics_data(smu,
- METRICS_AVERAGE_SOCKETPOWER,
+ METRICS_CURR_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
@@ -388,6 +388,7 @@ static int smu_v13_0_5_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
break;
@@ -831,6 +832,8 @@ static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu,
uint32_t max)
{
enum smu_message_type msg_set_min, msg_set_max;
+ uint32_t min_clk = min;
+ uint32_t max_clk = max;
int ret = 0;
if (!smu_v13_0_5_clk_dpm_is_enabled(smu, clk_type))
@@ -851,11 +854,16 @@ static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu,
return -EINVAL;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
+ if (clk_type == SMU_VCLK) {
+ min_clk = min << SMU_13_VCLK_SHIFT;
+ max_clk = max << SMU_13_VCLK_SHIFT;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min_clk, NULL);
if (ret)
goto out;
- ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max_clk, NULL);
if (ret)
goto out;
@@ -866,7 +874,7 @@ out:
static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min = 0, max = 0;
@@ -898,7 +906,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
goto print_clk_out;
for (i = 0; i < count; i++) {
- ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
goto print_clk_out;
@@ -970,31 +979,79 @@ force_level_out:
return ret;
}
+static int smu_v13_0_5_get_dpm_profile_freq(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ enum smu_clk_type clk_type,
+ uint32_t *min_clk,
+ uint32_t *max_clk)
+{
+ int ret = 0;
+ uint32_t clk_limit = 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clk_limit = SMU_13_0_5_UMD_PSTATE_GFXCLK;
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit);
+ else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL);
+ break;
+ case SMU_VCLK:
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit);
+ break;
+ case SMU_DCLK:
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ *min_clk = *max_clk = clk_limit;
+ return ret;
+}
+
static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
struct amdgpu_device *adev = smu->adev;
uint32_t sclk_min = 0, sclk_max = 0;
+ uint32_t vclk_min = 0, vclk_max = 0;
+ uint32_t dclk_min = 0, dclk_max = 0;
int ret = 0;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max);
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max);
sclk_min = sclk_max;
+ vclk_min = vclk_max;
+ dclk_min = dclk_max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL);
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL);
sclk_max = sclk_min;
+ vclk_max = vclk_min;
+ dclk_max = dclk_min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max);
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- /* Temporarily do nothing since the optimal clocks haven't been provided yet */
+ smu_v13_0_5_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max);
+ smu_v13_0_5_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max);
+ smu_v13_0_5_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max);
break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ dev_err(adev->dev, "The performance level profile_min_mclk is not supported.");
+ return -EOPNOTSUPP;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
return 0;
@@ -1015,6 +1072,23 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
smu->gfx_actual_soft_max_freq = sclk_max;
}
+ if (vclk_min && vclk_max) {
+ ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
+ SMU_VCLK,
+ vclk_min,
+ vclk_max);
+ if (ret)
+ return ret;
+ }
+
+ if (dclk_min && dclk_max) {
+ ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
+ SMU_DCLK,
+ dclk_min,
+ dclk_max);
+ if (ret)
+ return ret;
+ }
return ret;
}
@@ -1068,6 +1142,7 @@ void smu_v13_0_5_set_ppt_funcs(struct smu_context *smu)
smu->feature_map = smu_v13_0_5_feature_mask_map;
smu->table_map = smu_v13_0_5_table_map;
smu->is_apu = true;
+ smu->smc_driver_if_version = SMU13_0_5_DRIVER_IF_VERSION;
smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_34);
smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_2);
smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_33);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.h
index 40bc0f8e6d61..263cd651855e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.h
@@ -24,6 +24,6 @@
#define __SMU_V13_0_5_PPT_H__
extern void smu_v13_0_5_set_ppt_funcs(struct smu_context *smu);
-#define SMU_13_0_5_UMD_PSTATE_GFXCLK 1100
+#define SMU_13_0_5_UMD_PSTATE_GFXCLK 700
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index ea8f3d6fb98b..6ed9cd0a1e4e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -80,14 +80,23 @@
/* possible frequency drift (1Mhz) */
#define EPSILON 1
-#define smnPCIE_ESM_CTRL 0x111003D0
+#define smnPCIE_ESM_CTRL 0x93D0
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
+#define MAX_LINK_WIDTH 6
+
+#define smnPCIE_LC_SPEED_CNTL 0x1a340290
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
+#define LINK_SPEED_MAX 4
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1),
- MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1),
+ MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
+ MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
@@ -98,8 +107,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
- MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 0),
- MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 0),
+ MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
+ MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
@@ -118,10 +127,11 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0),
MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
- MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 0),
- MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 0),
+ MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1),
+ MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1),
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
};
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
@@ -171,18 +181,12 @@ static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(I2C_COMMANDS),
};
-#define THROTTLER_PROCHOT_GFX_BIT 0
-#define THROTTLER_PPT_BIT 1
-#define THROTTLER_TEMP_SOC_BIT 2
-#define THROTTLER_TEMP_VR_GFX_BIT 3
-#define THROTTLER_TEMP_HBM_BIT 4
-
static const uint8_t smu_v13_0_6_throttler_map[] = {
[THROTTLER_PPT_BIT] = (SMU_THROTTLER_PPT0_BIT),
- [THROTTLER_TEMP_SOC_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT),
- [THROTTLER_TEMP_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
- [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
- [THROTTLER_PROCHOT_GFX_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
+ [THROTTLER_THERMAL_SOCKET_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT),
+ [THROTTLER_THERMAL_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
+ [THROTTLER_THERMAL_VR_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
+ [THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
};
struct PPTable_t {
@@ -197,6 +201,7 @@ struct PPTable_t {
uint32_t LclkFrequencyTable[4];
uint32_t MaxLclkDpmRange;
uint32_t MinLclkDpmRange;
+ uint64_t PublicSerialNumber_AID;
bool Init;
};
@@ -220,10 +225,12 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -324,14 +331,24 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- int ret;
- int i;
+ int ret, i, retry = 100;
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
- ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
- if (ret)
- return ret;
+ while (retry--) {
+ ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
+ if (ret)
+ return ret;
+
+ /* Ensure that metrics have been updated */
+ if (metrics->AccumulationCounter)
+ break;
+
+ usleep_range(1000, 1100);
+ }
+
+ if (!retry)
+ return -ETIME;
pptable->MaxSocketPowerLimit =
SMUQ10_TO_UINT(metrics->MaxSocketPowerLimit);
@@ -355,6 +372,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
SMUQ10_TO_UINT(metrics->LclkFrequencyTable[i]);
}
+ /* use AID0 serial number by default */
+ pptable->PublicSerialNumber_AID = metrics->PublicSerialNumber_AID[0];
+
pptable->Init = true;
}
@@ -385,7 +405,7 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
break;
case SMU_SOCCLK:
if (pptable->Init)
- clock_limit = pptable->UclkFrequencyTable[0];
+ clock_limit = pptable->SocclkFrequencyTable[0];
break;
case SMU_FCLK:
if (pptable->Init)
@@ -638,16 +658,14 @@ static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1,
return (abs(frequency1 - frequency2) <= EPSILON);
}
-static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu,
- MetricsTable_t *metrics)
+static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu)
{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_13_0_power_context *power_context = smu_power->power_context;
uint32_t throttler_status = 0;
- throttler_status |= metrics->ProchotResidencyAcc > 0 ? 1U << THROTTLER_PROCHOT_GFX_BIT : 0;
- throttler_status |= metrics->PptResidencyAcc > 0 ? 1U << THROTTLER_PPT_BIT : 0;
- throttler_status |= metrics->SocketThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_SOC_BIT : 0;
- throttler_status |= metrics->VrThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_VR_GFX_BIT : 0;
- throttler_status |= metrics->HbmThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_HBM_BIT : 0;
+ throttler_status = atomic_read(&power_context->throttle_status);
+ dev_dbg(smu->adev->dev, "SMU Throttler status: %u", throttler_status);
return throttler_status;
}
@@ -658,7 +676,10 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
{
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t smu_version;
int ret = 0;
+ int xcc_id;
ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
if (ret)
@@ -668,7 +689,13 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
switch (member) {
case METRICS_CURR_GFXCLK:
case METRICS_AVERAGE_GFXCLK:
- *value = 0;
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (smu_version >= 0x552F00) {
+ xcc_id = GET_INST(GC, 0);
+ *value = SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc_id]);
+ } else {
+ *value = 0;
+ }
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
@@ -693,23 +720,23 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_MEMACTIVITY:
*value = SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
break;
- case METRICS_AVERAGE_SOCKETPOWER:
+ case METRICS_CURR_SOCKETPOWER:
*value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
+ *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
+ *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature);
- break;
- case METRICS_THROTTLER_STATUS:
- *value = smu_v13_0_6_get_throttler_status(smu, metrics);
+ *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
*value = UINT_MAX;
@@ -764,8 +791,6 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
- uint32_t display_levels;
- uint32_t freq_values[3] = { 0 };
uint32_t min_clk, max_clk;
smu_cmn_get_sysfs_buf(&buf, &size);
@@ -790,50 +815,24 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
return ret;
}
- single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
- ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
- if (ret) {
- dev_err(smu->adev->dev,
- "Attempt to get gfx clk levels Failed!");
- return ret;
- }
-
- display_levels = clocks.num_levels;
-
min_clk = pstate_table->gfxclk_pstate.curr.min;
max_clk = pstate_table->gfxclk_pstate.curr.max;
- freq_values[0] = min_clk;
- freq_values[1] = max_clk;
-
- /* fine-grained dpm has only 2 levels */
- if (now > min_clk && now < max_clk) {
- display_levels = clocks.num_levels + 1;
- freq_values[2] = max_clk;
- freq_values[1] = now;
- }
-
- /*
- * For DPM disabled case, there will be only one clock level.
- * And it's safe to assume that is always the current clock.
- */
- if (display_levels == clocks.num_levels) {
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(
- buf, size, "%d: %uMhz %s\n", i,
- freq_values[i],
- (clocks.num_levels == 1) ?
- "*" :
- (smu_v13_0_6_freqs_in_same_level(
- freq_values[i], now) ?
- "*" :
- ""));
+ if (!smu_v13_0_6_freqs_in_same_level(now, min_clk) &&
+ !smu_v13_0_6_freqs_in_same_level(now, max_clk)) {
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n",
+ min_clk);
+ size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
+ now);
+ size += sysfs_emit_at(buf, size, "2: %uMhz\n",
+ max_clk);
} else {
- for (i = 0; i < display_levels; i++)
- size += sysfs_emit_at(buf, size,
- "%d: %uMhz %s\n", i,
- freq_values[i],
- i == 1 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
+ min_clk,
+ smu_v13_0_6_freqs_in_same_level(now, min_clk) ? "*" : "");
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ max_clk,
+ smu_v13_0_6_freqs_in_same_level(now, max_clk) ? "*" : "");
}
break;
@@ -1146,15 +1145,6 @@ static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu,
return ret;
}
-static int smu_v13_0_6_get_gpu_power(struct smu_context *smu, uint32_t *value)
-{
- if (!value)
- return -EINVAL;
-
- return smu_v13_0_6_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER,
- value);
-}
-
static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu,
enum amd_pp_sensors sensor,
uint32_t *value)
@@ -1200,8 +1190,10 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
- ret = smu_v13_0_6_get_gpu_power(smu, (uint32_t *)data);
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
+ ret = smu_v13_0_6_get_smu_metrics_data(smu,
+ METRICS_CURR_SOCKETPOWER,
+ (uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
@@ -1227,6 +1219,7 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
break;
@@ -1240,27 +1233,12 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
uint32_t *default_power_limit,
uint32_t *max_power_limit)
{
- struct smu_table_context *smu_table = &smu->smu_table;
- struct PPTable_t *pptable =
- (struct PPTable_t *)smu_table->driver_pptable;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
uint32_t power_limit = 0;
int ret;
- if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
- if (current_power_limit)
- *current_power_limit = 0;
- if (default_power_limit)
- *default_power_limit = 0;
- if (max_power_limit)
- *max_power_limit = 0;
-
- dev_warn(
- smu->adev->dev,
- "PPT feature is not enabled, power values can't be fetched.");
-
- return 0;
- }
-
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
if (ret) {
@@ -1287,16 +1265,149 @@ static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
return smu_v13_0_set_power_limit(smu, limit_type, limit);
}
+static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_13_0_power_context *power_context = smu_power->power_context;
+ uint32_t client_id = entry->client_id;
+ uint32_t ctxid = entry->src_data[0];
+ uint32_t src_id = entry->src_id;
+ uint32_t data;
+
+ if (client_id == SOC15_IH_CLIENTID_MP1) {
+ if (src_id == IH_INTERRUPT_ID_TO_DRIVER) {
+ /* ACK SMUToHost interrupt */
+ data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
+ /*
+ * ctxid is used to distinguish different events for SMCToHost
+ * interrupt.
+ */
+ switch (ctxid) {
+ case IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
+ /*
+ * Increment the throttle interrupt counter
+ */
+ atomic64_inc(&smu->throttle_int_counter);
+
+ if (!atomic_read(&adev->throttling_logging_enabled))
+ return 0;
+
+ /* This uses the new method which fixes the
+ * incorrect throttling status reporting
+ * through metrics table. For older FWs,
+ * it will be ignored.
+ */
+ if (__ratelimit(&adev->throttling_logging_rs)) {
+ atomic_set(
+ &power_context->throttle_status,
+ entry->src_data[1]);
+ schedule_work(&smu->throttling_logging_work);
+ }
+
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_6_set_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned tyep,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ /* For MP1 SW irqs */
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ /* For MP1 SW irqs */
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
+
+ val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
+ val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
+ WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs smu_v13_0_6_irq_funcs = {
+ .set = smu_v13_0_6_set_irq_state,
+ .process = smu_v13_0_6_irq_process,
+};
+
+static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_irq_src *irq_src = &smu->irq_source;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ irq_src->num_types = 1;
+ irq_src->funcs = &smu_v13_0_6_irq_funcs;
+
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
+ IH_INTERRUPT_ID_TO_DRIVER,
+ irq_src);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v13_0_6_notify_unload(struct smu_context *smu)
+{
+ uint32_t smu_version;
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (smu_version <= 0x553500)
+ return 0;
+
+ dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
+ /* Ignore return, just intimate FW that driver is not going to be there */
+ smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+
+ return 0;
+}
+
static int smu_v13_0_6_system_features_control(struct smu_context *smu,
bool enable)
{
- int ret;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
- /* Nothing to be done for APU */
- if (smu->adev->flags & AMD_IS_APU)
+ if (amdgpu_sriov_vf(adev))
return 0;
- ret = smu_v13_0_system_features_control(smu, enable);
+ if (enable) {
+ if (!(adev->flags & AMD_IS_APU))
+ ret = smu_v13_0_system_features_control(smu, enable);
+ } else {
+ /* Notify FW that the device is no longer driver managed */
+ smu_v13_0_6_notify_unload(smu);
+ }
return ret;
}
@@ -1639,7 +1750,6 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
}
mutex_lock(&adev->pm.mutex);
r = smu_v13_0_6_request_i2c_xfer(smu, req);
- mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -1656,6 +1766,7 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
}
r = num_msgs;
fail:
+ mutex_unlock(&adev->pm.mutex);
kfree(req);
return r;
}
@@ -1737,19 +1848,11 @@ static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- //SmuMetrics_t *metrics = smu->smu_table.metrics_table;
- uint32_t upper32 = 0, lower32 = 0;
- int ret;
-
- ret = smu_cmn_get_metrics_table(smu, NULL, false);
- if (ret)
- goto out;
-
- //upper32 = metrics->PublicSerialNumUpper32;
- //lower32 = metrics->PublicSerialNumLower32;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
-out:
- adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
+ adev->unique_id = pptable->PublicSerialNumber_AID;
if (adev->serial[0] == '\0')
sprintf(adev->serial, "%016llx", adev->unique_id);
}
@@ -1774,37 +1877,35 @@ static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en)
en ? 0 : 1, NULL);
}
-static const struct throttling_logging_label {
- uint32_t feature_mask;
- const char *label;
-} logging_label[] = {
- { (1U << THROTTLER_TEMP_HBM_BIT), "HBM" },
- { (1U << THROTTLER_TEMP_SOC_BIT), "SOC" },
- { (1U << THROTTLER_TEMP_VR_GFX_BIT), "VR limit" },
+static const char *const throttling_logging_label[] = {
+ [THROTTLER_PROCHOT_BIT] = "Prochot",
+ [THROTTLER_PPT_BIT] = "PPT",
+ [THROTTLER_THERMAL_SOCKET_BIT] = "SOC",
+ [THROTTLER_THERMAL_VR_BIT] = "VR",
+ [THROTTLER_THERMAL_HBM_BIT] = "HBM"
};
+
static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
{
- int ret;
int throttler_idx, throtting_events = 0, buf_idx = 0;
struct amdgpu_device *adev = smu->adev;
uint32_t throttler_status;
char log_buf[256];
- ret = smu_v13_0_6_get_smu_metrics_data(smu, METRICS_THROTTLER_STATUS,
- &throttler_status);
- if (ret)
+ throttler_status = smu_v13_0_6_get_throttler_status(smu);
+ if (!throttler_status)
return;
memset(log_buf, 0, sizeof(log_buf));
- for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label);
+ for (throttler_idx = 0;
+ throttler_idx < ARRAY_SIZE(throttling_logging_label);
throttler_idx++) {
- if (throttler_status &
- logging_label[throttler_idx].feature_mask) {
+ if (throttler_status & (1U << throttler_idx)) {
throtting_events++;
- buf_idx += snprintf(log_buf + buf_idx,
- sizeof(log_buf) - buf_idx, "%s%s",
- throtting_events > 1 ? " and " : "",
- logging_label[throttler_idx].label);
+ buf_idx += snprintf(
+ log_buf + buf_idx, sizeof(log_buf) - buf_idx,
+ "%s%s", throtting_events > 1 ? " and " : "",
+ throttling_logging_label[throttler_idx]);
if (buf_idx >= sizeof(log_buf)) {
dev_err(adev->dev, "buffer overflow!\n");
log_buf[sizeof(log_buf) - 1] = '\0';
@@ -1813,19 +1914,28 @@ static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
}
}
- dev_warn(
- adev->dev,
- "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",
- log_buf);
+ dev_warn(adev->dev,
+ "WARN: GPU is throttled, expect performance decrease. %s.\n",
+ log_buf);
kgd2kfd_smi_event_throttle(
smu->adev->kfd.dev,
smu_cmn_get_indep_throttler_status(throttler_status,
smu_v13_0_6_throttler_map));
}
+static int
+smu_v13_0_6_get_current_pcie_link_width_level(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ return REG_GET_FIELD(RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL),
+ PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
+}
+
static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ uint32_t speed_level;
uint32_t esm_ctrl;
/* TODO: confirm this on real target */
@@ -1833,7 +1943,13 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
if ((esm_ctrl >> 15) & 0x1FFFF)
return (((esm_ctrl >> 8) & 0x3F) + 128);
- return smu_v13_0_get_current_pcie_link_speed(smu);
+ speed_level = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ if (speed_level > LINK_SPEED_MAX)
+ speed_level = 0;
+
+ return pcie_gen_to_speed(speed_level + 1);
}
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
@@ -1841,8 +1957,13 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_3 *gpu_metrics =
(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, inst0, xcc0;
MetricsTable_t *metrics;
- int i, ret = 0;
+ u16 link_width_level;
+
+ inst0 = adev->sdma.instance[0].aid_id;
+ xcc0 = GET_INST(GC, 0);
metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
@@ -1851,51 +1972,63 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
- /* TODO: Decide on how to fill in zero value fields */
- gpu_metrics->temperature_edge = 0;
- gpu_metrics->temperature_hotspot = 0;
- gpu_metrics->temperature_mem = 0;
- gpu_metrics->temperature_vrgfx = 0;
- gpu_metrics->temperature_vrsoc = 0;
- gpu_metrics->temperature_vrmem = 0;
-
- gpu_metrics->average_gfx_activity = 0;
- gpu_metrics->average_umc_activity = 0;
- gpu_metrics->average_mm_activity = 0;
-
- gpu_metrics->average_socket_power = 0;
- gpu_metrics->energy_accumulator = 0;
-
- gpu_metrics->average_gfxclk_frequency = 0;
- gpu_metrics->average_socclk_frequency = 0;
- gpu_metrics->average_uclk_frequency = 0;
- gpu_metrics->average_vclk0_frequency = 0;
- gpu_metrics->average_dclk0_frequency = 0;
-
- gpu_metrics->current_gfxclk = 0;
- gpu_metrics->current_socclk = 0;
- gpu_metrics->current_uclk = 0;
- gpu_metrics->current_vclk0 = 0;
- gpu_metrics->current_dclk0 = 0;
-
+ gpu_metrics->temperature_hotspot =
+ SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
+ /* Individual HBM stack temperature is not reported */
+ gpu_metrics->temperature_mem =
+ SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
+ /* Reports max temperature of all voltage rails */
+ gpu_metrics->temperature_vrsoc =
+ SMUQ10_TO_UINT(metrics->MaxVrTemperature);
+
+ gpu_metrics->average_gfx_activity =
+ SMUQ10_TO_UINT(metrics->SocketGfxBusy);
+ gpu_metrics->average_umc_activity =
+ SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
+
+ gpu_metrics->average_socket_power =
+ SMUQ10_TO_UINT(metrics->SocketPower);
+ /* Energy counter reported in 15.259uJ (2^-16) units */
+ gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+
+ gpu_metrics->current_gfxclk =
+ SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]);
+ gpu_metrics->current_socclk =
+ SMUQ10_TO_UINT(metrics->SocclkFrequency[inst0]);
+ gpu_metrics->current_uclk = SMUQ10_TO_UINT(metrics->UclkFrequency);
+ gpu_metrics->current_vclk0 =
+ SMUQ10_TO_UINT(metrics->VclkFrequency[inst0]);
+ gpu_metrics->current_dclk0 =
+ SMUQ10_TO_UINT(metrics->DclkFrequency[inst0]);
+
+ gpu_metrics->average_gfxclk_frequency = gpu_metrics->current_gfxclk;
+ gpu_metrics->average_socclk_frequency = gpu_metrics->current_socclk;
+ gpu_metrics->average_uclk_frequency = gpu_metrics->current_uclk;
+ gpu_metrics->average_vclk0_frequency = gpu_metrics->current_vclk0;
+ gpu_metrics->average_dclk0_frequency = gpu_metrics->current_dclk0;
+
+ /* Throttle status is not reported through metrics now */
gpu_metrics->throttle_status = 0;
- gpu_metrics->indep_throttle_status = smu_cmn_get_indep_throttler_status(
- gpu_metrics->throttle_status, smu_v13_0_6_throttler_map);
- gpu_metrics->current_fan_speed = 0;
+ if (!(adev->flags & AMD_IS_APU)) {
+ link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
+ if (link_width_level > MAX_LINK_WIDTH)
+ link_width_level = 0;
- gpu_metrics->pcie_link_width = 0;
- gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu);
+ gpu_metrics->pcie_link_width =
+ DECODE_LANE_WIDTH(link_width_level);
+ gpu_metrics->pcie_link_speed =
+ smu_v13_0_6_get_current_pcie_link_speed(smu);
+ }
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
- gpu_metrics->gfx_activity_acc = 0;
- gpu_metrics->mem_activity_acc = 0;
-
- for (i = 0; i < NUM_HBM_INSTANCES; i++)
- gpu_metrics->temperature_hbm[i] = 0;
+ gpu_metrics->gfx_activity_acc =
+ SMUQ10_TO_UINT(metrics->SocketGfxBusyAcc);
+ gpu_metrics->mem_activity_acc =
+ SMUQ10_TO_UINT(metrics->DramBandwidthUtilizationAcc);
- gpu_metrics->firmware_timestamp = 0;
+ gpu_metrics->firmware_timestamp = metrics->Timestamp;
*table = (void *)gpu_metrics;
kfree(metrics);
@@ -1905,27 +2038,27 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
{
- u32 smu_version;
int ret = 0, index;
struct amdgpu_device *adev = smu->adev;
int timeout = 10;
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
-
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_GfxDeviceDriverReset);
mutex_lock(&smu->message_lock);
+
ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
SMU_RESET_MODE_2);
+
/* This is similar to FLR, wait till max FLR timeout */
msleep(100);
+
dev_dbg(smu->adev->dev, "restore config space...\n");
/* Restore the config space saved during init */
amdgpu_device_load_pci_state(adev->pdev);
dev_dbg(smu->adev->dev, "wait for reset ack\n");
- while (ret == -ETIME && timeout) {
+ do {
ret = smu_cmn_wait_for_response(smu);
/* Wait a bit more time for getting ACK */
if (ret == -ETIME) {
@@ -1934,16 +2067,14 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
continue;
}
- if (ret != 1) {
+ if (ret) {
dev_err(adev->dev,
- "failed to send mode2 message \tparam: 0x%08x response %#x\n",
+ "failed to send mode2 message \tparam: 0x%08x error code %d\n",
SMU_RESET_MODE_2, ret);
goto out;
}
- }
+ } while (ret == -ETIME && timeout);
- if (ret == 1)
- ret = 0;
out:
mutex_unlock(&smu->message_lock);
@@ -2032,11 +2163,9 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.set_power_limit = smu_v13_0_6_set_power_limit,
.set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
- /* TODO: Thermal limits unknown, skip these for now
- .register_irq_handler = smu_v13_0_register_irq_handler,
+ .register_irq_handler = smu_v13_0_6_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
- */
.setup_pptable = smu_v13_0_6_setup_pptable,
.baco_is_support = smu_v13_0_6_is_baco_supported,
.get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
@@ -2065,5 +2194,6 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->clock_map = smu_v13_0_6_clk_map;
smu->feature_map = smu_v13_0_6_feature_mask_map;
smu->table_map = smu_v13_0_6_table_map;
+ smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 3d9ff46706fb..94ef5b4d116d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -72,6 +72,14 @@
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
+#define PP_OD_FEATURE_GFXCLK_FMIN 0
+#define PP_OD_FEATURE_GFXCLK_FMAX 1
+#define PP_OD_FEATURE_UCLK_FMIN 2
+#define PP_OD_FEATURE_UCLK_FMAX 3
+#define PP_OD_FEATURE_GFX_VF_CURVE 4
+
+#define LINK_SPEED_MAX 3
+
static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -125,6 +133,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
+ MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -205,6 +214,7 @@ static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(DRIVER_SMU_CONFIG),
TAB_MAP(ACTIVITY_MONITOR_COEFF),
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
+ TAB_MAP(OVERDRIVE),
};
static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -321,6 +331,12 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
struct smu_baco_context *smu_baco = &smu->smu_baco;
PPTable_t *smc_pptable = table_context->driver_pptable;
BoardTable_t *BoardTable = &smc_pptable->BoardTable;
+#if 0
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &smc_pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &smc_pptable->SkuTable.OverDriveLimitsMin;
+#endif
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@@ -332,14 +348,22 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
smu_baco->maco_support = true;
- table_context->thermal_controller_type =
- powerplay_table->thermal_controller_type;
+#if 0
+ if (!overdrive_lowerlimits->FeatureCtrlMask ||
+ !overdrive_upperlimits->FeatureCtrlMask)
+ smu->od_enabled = false;
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
+#else
+ smu->od_enabled = false;
+#endif
+
+ table_context->thermal_controller_type =
+ powerplay_table->thermal_controller_type;
return 0;
}
@@ -376,8 +400,7 @@ static int smu_v13_0_7_check_fw_status(struct smu_context *smu)
}
#ifndef atom_smc_dpm_info_table_13_0_7
-struct atom_smc_dpm_info_table_13_0_7
-{
+struct atom_smc_dpm_info_table_13_0_7 {
struct atom_common_table_header table_header;
BoardTable_t BoardTable;
};
@@ -478,13 +501,13 @@ static int smu_v13_0_7_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTableExternal_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM);
+ AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -718,7 +741,7 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
- struct smu_table_context *smu_table= &smu->smu_table;
+ struct smu_table_context *smu_table = &smu->smu_table;
SmuMetrics_t *metrics =
&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
int ret = 0;
@@ -913,7 +936,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = smu_v13_0_7_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
@@ -939,7 +962,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = smu_v13_0_7_get_smu_metrics_data(smu,
- METRICS_AVERAGE_UCLK,
+ METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
@@ -957,6 +980,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
default:
ret = -EOPNOTSUPP;
break;
@@ -1012,16 +1036,116 @@ static int smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context *smu,
value);
}
+static bool smu_v13_0_7_is_od_feature_supported(struct smu_context *smu,
+ int od_feature_bit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+
+ return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
+}
+
+static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,
+ int od_feature_bit,
+ int32_t *min,
+ int32_t *max)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsMin;
+ int32_t od_min_setting, od_max_setting;
+
+ switch (od_feature_bit) {
+ case PP_OD_FEATURE_GFXCLK_FMIN:
+ od_min_setting = overdrive_lowerlimits->GfxclkFmin;
+ od_max_setting = overdrive_upperlimits->GfxclkFmin;
+ break;
+ case PP_OD_FEATURE_GFXCLK_FMAX:
+ od_min_setting = overdrive_lowerlimits->GfxclkFmax;
+ od_max_setting = overdrive_upperlimits->GfxclkFmax;
+ break;
+ case PP_OD_FEATURE_UCLK_FMIN:
+ od_min_setting = overdrive_lowerlimits->UclkFmin;
+ od_max_setting = overdrive_upperlimits->UclkFmin;
+ break;
+ case PP_OD_FEATURE_UCLK_FMAX:
+ od_min_setting = overdrive_lowerlimits->UclkFmax;
+ od_max_setting = overdrive_upperlimits->UclkFmax;
+ break;
+ case PP_OD_FEATURE_GFX_VF_CURVE:
+ od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary;
+ od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary;
+ break;
+ default:
+ od_min_setting = od_max_setting = INT_MAX;
+ break;
+ }
+
+ if (min)
+ *min = od_min_setting;
+ if (max)
+ *max = od_max_setting;
+}
+
+static void smu_v13_0_7_dump_od_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
+ od_table->OverDriveTable.GfxclkFmax);
+ dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
+ od_table->OverDriveTable.UclkFmax);
+}
+
+static int smu_v13_0_7_get_overdrive_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ int ret = 0;
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_OVERDRIVE,
+ 0,
+ (void *)od_table,
+ false);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
+
+ return ret;
+}
+
+static int smu_v13_0_7_upload_overdrive_table(struct smu_context *smu,
+ OverDriveTableExternal_t *od_table)
+{
+ int ret = 0;
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_OVERDRIVE,
+ 0,
+ (void *)od_table,
+ true);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+
+ return ret;
+}
+
static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
char *buf)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
int i, curr_freq, size = 0;
+ int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
@@ -1138,6 +1262,84 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
"*" : "");
break;
+ case SMU_OD_SCLK:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_GFXCLK_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
+ od_table->OverDriveTable.GfxclkFmin,
+ od_table->OverDriveTable.GfxclkFmax);
+ break;
+
+ case SMU_OD_MCLK:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_UCLK_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
+ size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n",
+ od_table->OverDriveTable.UclkFmin,
+ od_table->OverDriveTable.UclkFmax);
+ break;
+
+ case SMU_OD_VDDC_CURVE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n");
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ size += sysfs_emit_at(buf, size, "%d: %dmv\n",
+ i,
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]);
+ break;
+
+ case SMU_OD_RANGE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
+ !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
+ !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+
+ if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMIN,
+ &min_value,
+ NULL);
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ NULL,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMIN,
+ &min_value,
+ NULL);
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMAX,
+ NULL,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n",
+ min_value, max_value);
+ }
+ break;
+
default:
break;
}
@@ -1145,6 +1347,217 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
return size;
}
+static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[],
+ uint32_t size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t offset_of_voltageoffset;
+ int32_t minimum, maximum;
+ uint32_t feature_ctrlmask;
+ int i, ret = 0;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
+ dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMIN,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.GfxclkFmin = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
+ break;
+
+ case 1:
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.GfxclkFmax = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
+ break;
+
+ default:
+ dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ }
+
+ if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
+ dev_err(adev->dev,
+ "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
+ (uint32_t)od_table->OverDriveTable.GfxclkFmin,
+ (uint32_t)od_table->OverDriveTable.GfxclkFmax);
+ return -EINVAL;
+ }
+ break;
+
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
+ dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ switch (input[i]) {
+ case 0:
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMIN,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.UclkFmin = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
+ break;
+
+ case 1:
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_UCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[i + 1] < minimum ||
+ input[i + 1] > maximum) {
+ dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n",
+ input[i + 1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.UclkFmax = input[i + 1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
+ break;
+
+ default:
+ dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
+ dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ }
+
+ if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) {
+ dev_err(adev->dev,
+ "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n",
+ (uint32_t)od_table->OverDriveTable.UclkFmin,
+ (uint32_t)od_table->OverDriveTable.UclkFmax);
+ return -EINVAL;
+ }
+ break;
+
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
+ dev_warn(adev->dev, "VF curve setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS ||
+ input[0] < 0)
+ return -EINVAL;
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFX_VF_CURVE,
+ &minimum,
+ &maximum);
+ if (input[1] < minimum ||
+ input[1] > maximum) {
+ dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
+ input[1], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
+ memcpy(od_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTableExternal_t));
+ od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
+ fallthrough;
+
+ case PP_OD_COMMIT_DPM_TABLE:
+ /*
+ * The member below instructs PMFW the settings focused in
+ * this single operation.
+ * `uint32_t FeatureCtrlMask;`
+ * It does not contain actual informations about user's custom
+ * settings. Thus we do not cache it.
+ */
+ offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary);
+ if (memcmp((u8 *)od_table + offset_of_voltageoffset,
+ table_context->user_overdrive_table + offset_of_voltageoffset,
+ sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) {
+ smu_v13_0_7_dump_od_table(smu, od_table);
+
+ ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ od_table->OverDriveTable.FeatureCtrlMask = 0;
+ memcpy(table_context->user_overdrive_table + offset_of_voltageoffset,
+ (u8 *)od_table + offset_of_voltageoffset,
+ sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset);
+
+ if (!memcmp(table_context->user_overdrive_table,
+ table_context->boot_overdrive_table,
+ sizeof(OverDriveTableExternal_t)))
+ smu->user_dpm_profile.user_od = false;
+ else
+ smu->user_dpm_profile.user_od = true;
+ }
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t mask)
@@ -1224,39 +1637,7 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
return ret;
}
-static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
- uint32_t pcie_gen_cap,
- uint32_t pcie_width_cap)
-{
- struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_pcie_table *pcie_table =
- &dpm_context->dpm_tables.pcie_table;
- uint32_t smu_pcie_arg;
- int ret, i;
-
- for (i = 0; i < pcie_table->num_of_link_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
- pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
- pcie_table->pcie_lane[i] = pcie_width_cap;
-
- smu_pcie_arg = i << 16;
- smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
- smu_pcie_arg |= pcie_table->pcie_lane[i];
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg,
- NULL);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static const struct smu_temperature_range smu13_thermal_policy[] =
-{
+static const struct smu_temperature_range smu13_thermal_policy[] = {
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
};
@@ -1357,7 +1738,10 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
gpu_metrics->pcie_link_width = metrics->PcieWidth;
- gpu_metrics->pcie_link_speed = metrics->PcieRate;
+ if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
+ gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
+ else
+ gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
@@ -1370,6 +1754,78 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu)
+{
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
+ OverDriveTableExternal_t *boot_od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table;
+ OverDriveTableExternal_t *user_od_table =
+ (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table;
+ OverDriveTableExternal_t user_od_table_bak;
+ int ret = 0;
+ int i;
+
+ ret = smu_v13_0_7_get_overdrive_table(smu, boot_od_table);
+ if (ret)
+ return ret;
+
+ smu_v13_0_7_dump_od_table(smu, boot_od_table);
+
+ memcpy(od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+
+ /*
+ * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
+ * but we have to preserve user defined values in "user_od_table".
+ */
+ if (!smu->adev->in_suspend) {
+ memcpy(user_od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+ smu->user_dpm_profile.user_od = false;
+ } else if (smu->user_dpm_profile.user_od) {
+ memcpy(&user_od_table_bak,
+ user_od_table,
+ sizeof(OverDriveTableExternal_t));
+ memcpy(user_od_table,
+ boot_od_table,
+ sizeof(OverDriveTableExternal_t));
+ user_od_table->OverDriveTable.GfxclkFmin =
+ user_od_table_bak.OverDriveTable.GfxclkFmin;
+ user_od_table->OverDriveTable.GfxclkFmax =
+ user_od_table_bak.OverDriveTable.GfxclkFmax;
+ user_od_table->OverDriveTable.UclkFmin =
+ user_od_table_bak.OverDriveTable.UclkFmin;
+ user_od_table->OverDriveTable.UclkFmax =
+ user_od_table_bak.OverDriveTable.UclkFmax;
+ for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
+ user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
+ user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_7_restore_user_od_settings(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table = table_context->overdrive_table;
+ OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
+ int res;
+
+ user_od_table->OverDriveTable.FeatureCtrlMask = 1U << PP_OD_FEATURE_GFXCLK_BIT |
+ 1U << PP_OD_FEATURE_UCLK_BIT |
+ 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
+ res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table);
+ user_od_table->OverDriveTable.FeatureCtrlMask = 0;
+ if (res == 0)
+ memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t));
+
+ return res;
+}
+
static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context =
@@ -1683,7 +2139,8 @@ static int smu_v13_0_7_baco_enter(struct smu_context *smu)
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v13_0_baco_set_armd3_sequence(smu,
- smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
+ BACO_SEQ_BAMACO : BACO_SEQ_BACO);
else
return smu_v13_0_baco_enter(smu);
}
@@ -1751,7 +2208,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_7_print_clk_levels,
.force_clk_levels = smu_v13_0_7_force_clk_levels,
- .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
+ .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
@@ -1759,6 +2216,9 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
.get_gpu_metrics = smu_v13_0_7_get_gpu_metrics,
.set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range,
+ .set_default_od_settings = smu_v13_0_7_set_default_od_settings,
+ .restore_user_od_settings = smu_v13_0_7_restore_user_od_settings,
+ .od_edit_dpm_table = smu_v13_0_7_od_edit_dpm_table,
.set_performance_level = smu_v13_0_set_performance_level,
.gfx_off_control = smu_v13_0_gfx_off_control,
.get_fan_speed_pwm = smu_v13_0_7_get_fan_speed_pwm,
@@ -1770,6 +2230,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
.get_power_limit = smu_v13_0_7_get_power_limit,
.set_power_limit = smu_v13_0_set_power_limit,
+ .set_power_source = smu_v13_0_set_power_source,
.get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
.set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
.set_tool_table_location = smu_v13_0_set_tool_table_location,
@@ -1796,5 +2257,6 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
smu->table_map = smu_v13_0_7_table_map;
smu->pwr_src_map = smu_v13_0_7_pwr_src_map;
smu->workload_map = smu_v13_0_7_workload_map;
+ smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 04e56b0b3033..2e74d749efdd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -47,6 +47,14 @@
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1L
+#define SMU_13_0_8_UMD_PSTATE_GFXCLK 533
+#define SMU_13_0_8_UMD_PSTATE_SOCCLK 533
+#define SMU_13_0_8_UMD_PSTATE_FCLK 800
+
+#define SMU_13_0_1_UMD_PSTATE_GFXCLK 700
+#define SMU_13_0_1_UMD_PSTATE_SOCCLK 678
+#define SMU_13_0_1_UMD_PSTATE_FCLK 1800
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
@@ -357,7 +365,7 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_VCNACTIVITY:
*value = metrics->UvdActivity;
break;
- case METRICS_AVERAGE_SOCKETPOWER:
+ case METRICS_CURR_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) / 1000;
break;
case METRICS_TEMPERATURE_EDGE:
@@ -415,9 +423,9 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
- case AMDGPU_PP_SENSOR_GPU_POWER:
+ case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = yellow_carp_get_smu_metrics_data(smu,
- METRICS_AVERAGE_SOCKETPOWER,
+ METRICS_CURR_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
@@ -471,6 +479,7 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
(uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
break;
@@ -957,6 +966,9 @@ static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu,
uint32_t max)
{
enum smu_message_type msg_set_min, msg_set_max;
+ uint32_t min_clk = min;
+ uint32_t max_clk = max;
+
int ret = 0;
if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type))
@@ -985,11 +997,17 @@ static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu,
return -EINVAL;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
+ if (clk_type == SMU_VCLK) {
+ min_clk = min << SMU_13_VCLK_SHIFT;
+ max_clk = max << SMU_13_VCLK_SHIFT;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min_clk, NULL);
+
if (ret)
goto out;
- ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max_clk, NULL);
if (ret)
goto out;
@@ -997,12 +1015,49 @@ out:
return ret;
}
+static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu,
+ enum smu_clk_type clk_type)
+{
+ uint32_t clk_limit = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
+ clk_limit = SMU_13_0_8_UMD_PSTATE_GFXCLK;
+ if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
+ (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
+ clk_limit = SMU_13_0_1_UMD_PSTATE_GFXCLK;
+ break;
+ case SMU_SOCCLK:
+ if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
+ clk_limit = SMU_13_0_8_UMD_PSTATE_SOCCLK;
+ if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
+ (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
+ clk_limit = SMU_13_0_1_UMD_PSTATE_SOCCLK;
+ break;
+ case SMU_FCLK:
+ if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
+ clk_limit = SMU_13_0_8_UMD_PSTATE_FCLK;
+ if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
+ (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
+ clk_limit = SMU_13_0_1_UMD_PSTATE_FCLK;
+ break;
+ default:
+ break;
+ }
+
+ return clk_limit;
+}
+
static int yellow_carp_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
+ uint32_t clk_limit = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
@@ -1033,7 +1088,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
goto print_clk_out;
for (i = 0; i < count; i++) {
- ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
goto print_clk_out;
@@ -1043,6 +1099,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
break;
case SMU_GFXCLK:
case SMU_SCLK:
+ clk_limit = yellow_carp_get_umd_pstate_clk_default(smu, clk_type);
ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
goto print_clk_out;
@@ -1057,7 +1114,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
i == 0 ? "*" : "");
size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
- i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK,
+ i == 1 ? cur_value : clk_limit,
i == 1 ? "*" : "");
size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
i == 2 ? "*" : "");
@@ -1106,6 +1163,49 @@ force_level_out:
return ret;
}
+static int yellow_carp_get_dpm_profile_freq(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ enum smu_clk_type clk_type,
+ uint32_t *min_clk,
+ uint32_t *max_clk)
+{
+ int ret = 0;
+ uint32_t clk_limit = 0;
+
+ clk_limit = yellow_carp_get_umd_pstate_clk_default(smu, clk_type);
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit);
+ else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL);
+ break;
+ case SMU_SOCCLK:
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit);
+ break;
+ case SMU_FCLK:
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit);
+ else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL);
+ break;
+ case SMU_VCLK:
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit);
+ break;
+ case SMU_DCLK:
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ *min_clk = *max_clk = clk_limit;
+ return ret;
+}
+
static int yellow_carp_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
@@ -1113,6 +1213,9 @@ static int yellow_carp_set_performance_level(struct smu_context *smu,
uint32_t sclk_min = 0, sclk_max = 0;
uint32_t fclk_min = 0, fclk_max = 0;
uint32_t socclk_min = 0, socclk_max = 0;
+ uint32_t vclk_min = 0, vclk_max = 0;
+ uint32_t dclk_min = 0, dclk_max = 0;
+
int ret = 0;
switch (level) {
@@ -1120,28 +1223,42 @@ static int yellow_carp_set_performance_level(struct smu_context *smu,
yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max);
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max);
sclk_min = sclk_max;
fclk_min = fclk_max;
socclk_min = socclk_max;
+ vclk_min = vclk_max;
+ dclk_min = dclk_max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL);
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL);
sclk_max = sclk_min;
fclk_max = fclk_min;
socclk_max = socclk_min;
+ vclk_max = vclk_min;
+ dclk_max = dclk_min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max);
+ yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- /* Temporarily do nothing since the optimal clocks haven't been provided yet */
+ yellow_carp_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max);
+ yellow_carp_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max);
+ yellow_carp_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max);
+ yellow_carp_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max);
+ yellow_carp_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -1181,6 +1298,24 @@ static int yellow_carp_set_performance_level(struct smu_context *smu,
return ret;
}
+ if (vclk_min && vclk_max) {
+ ret = yellow_carp_set_soft_freq_limited_range(smu,
+ SMU_VCLK,
+ vclk_min,
+ vclk_max);
+ if (ret)
+ return ret;
+ }
+
+ if (dclk_min && dclk_max) {
+ ret = yellow_carp_set_soft_freq_limited_range(smu,
+ SMU_DCLK,
+ dclk_min,
+ dclk_max);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
@@ -1234,5 +1369,6 @@ void yellow_carp_set_ppt_funcs(struct smu_context *smu)
smu->feature_map = yellow_carp_feature_mask_map;
smu->table_map = yellow_carp_table_map;
smu->is_apu = true;
+ smu->smc_driver_if_version = SMU13_YELLOW_CARP_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h
index a9205a8ea3ad..b3ad8352c68a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h
@@ -24,6 +24,5 @@
#define __YELLOW_CARP_PPT_H__
extern void yellow_carp_set_ppt_funcs(struct smu_context *smu);
-#define YELLOW_CARP_UMD_PSTATE_GFXCLK 1100
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 3ecb900e6ecd..12618a583e97 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -39,6 +39,8 @@
#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+const int link_speed[] = {25, 50, 80, 160, 320, 640};
+
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) #type
static const char * const __smu_message_names[] = {
@@ -691,7 +693,7 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu,
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(fea) #fea
-static const char* __smu_feature_names[] = {
+static const char *__smu_feature_names[] = {
SMU_FEATURE_MASKS
};
@@ -927,7 +929,7 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
void *metrics_table,
bool bypass_cache)
{
- struct smu_table_context *smu_table= &smu->smu_table;
+ struct smu_table_context *smu_table = &smu->smu_table;
uint32_t table_size =
smu_table->tables[SMU_TABLE_SMU_METRICS].size;
int ret = 0;
@@ -969,7 +971,7 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
struct metrics_table_header *header = (struct metrics_table_header *)table;
uint16_t structure_size;
-#define METRICS_VERSION(a, b) ((a << 16) | b )
+#define METRICS_VERSION(a, b) ((a << 16) | b)
switch (METRICS_VERSION(frev, crev)) {
case METRICS_VERSION(1, 0):
@@ -996,6 +998,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(2, 3):
structure_size = sizeof(struct gpu_metrics_v2_3);
break;
+ case METRICS_VERSION(2, 4):
+ structure_size = sizeof(struct gpu_metrics_v2_4);
+ break;
default:
return;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index d7cd358a53bd..cc590e27d88a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -30,6 +30,14 @@
#define FDO_PWM_MODE_STATIC 1
#define FDO_PWM_MODE_STATIC_RPM 5
+extern const int link_speed[];
+
+/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
+static inline int pcie_gen_to_speed(uint32_t gen)
+{
+ return ((gen == 0) ? link_speed[0] : link_speed[gen - 1]);
+}
+
int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg_index,
uint32_t param);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index ceb13c838067..bcc42abfc768 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -61,14 +61,14 @@
#define smu_feature_get_enabled_mask(smu, mask) smu_ppt_funcs(get_enabled_mask, -EOPNOTSUPP, smu, mask)
#define smu_feature_is_enabled(smu, mask) smu_ppt_funcs(feature_is_enabled, 0, smu, mask)
#define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
-#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu)
+#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0, smu)
#define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu)
#define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu)
#define smu_enable_thermal_alert(smu) smu_ppt_funcs(enable_thermal_alert, 0, smu)
#define smu_disable_thermal_alert(smu) smu_ppt_funcs(disable_thermal_alert, 0, smu)
#define smu_smc_read_sensor(smu, sensor, data, size) smu_ppt_funcs(read_sensor, -EINVAL, smu, sensor, data, size)
#define smu_pre_display_config_changed(smu) smu_ppt_funcs(pre_display_config_changed, 0, smu)
-#define smu_display_config_changed(smu) smu_ppt_funcs(display_config_changed, 0 , smu)
+#define smu_display_config_changed(smu) smu_ppt_funcs(display_config_changed, 0, smu)
#define smu_apply_clocks_adjust_rules(smu) smu_ppt_funcs(apply_clocks_adjust_rules, 0, smu)
#define smu_notify_smc_display_config(smu) smu_ppt_funcs(notify_smc_display_config, 0, smu)
#define smu_run_btc(smu) smu_ppt_funcs(run_btc, 0, smu)
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index c1b89274d2a4..ddf20708370f 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
menu "ARM devices"
+ depends on DRM
config DRM_HDLCD
tristate "ARM HDLCD"
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index cea3fd5772b5..2c661f28410e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -12,6 +12,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_bridge.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
@@ -612,9 +614,11 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
struct komeda_crtc *kcrtc)
{
struct drm_crtc *crtc = &kcrtc->base;
+ struct drm_device *base = &kms->base;
+ struct drm_bridge *bridge;
int err;
- err = drm_crtc_init_with_planes(&kms->base, crtc,
+ err = drm_crtc_init_with_planes(base, crtc,
get_crtc_primary(kms, kcrtc), NULL,
&komeda_crtc_funcs, NULL);
if (err)
@@ -624,6 +628,22 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
crtc->port = kcrtc->master->of_output_port;
+ /* Construct an encoder for each pipeline and attach it to the remote
+ * bridge
+ */
+ kcrtc->encoder.possible_crtcs = drm_crtc_mask(crtc);
+ err = drm_simple_encoder_init(base, &kcrtc->encoder,
+ DRM_MODE_ENCODER_TMDS);
+ if (err)
+ return err;
+
+ bridge = devm_drm_of_get_bridge(base->dev, kcrtc->master->of_node,
+ KOMEDA_OF_PORT_OUTPUT, 0);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ err = drm_bridge_attach(&kcrtc->encoder, bridge, NULL, 0);
+
drm_crtc_enable_color_mgmt(crtc, 0, true, KOMEDA_COLOR_LUT_SIZE);
return err;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index cc7664c95a54..14ee79becacb 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -6,7 +6,7 @@
*/
#include <linux/io.h>
#include <linux/iommu.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index 28f76e07dd95..cb2a2be24c5f 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/component.h>
#include <linux/pm_runtime.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_module.h>
@@ -28,13 +27,11 @@ struct komeda_dev *dev_to_mdev(struct device *dev)
return mdrv ? mdrv->mdev : NULL;
}
-static void komeda_unbind(struct device *dev)
+static void komeda_platform_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct komeda_drv *mdrv = dev_get_drvdata(dev);
- if (!mdrv)
- return;
-
komeda_kms_detach(mdrv->kms);
if (pm_runtime_enabled(dev))
@@ -48,8 +45,9 @@ static void komeda_unbind(struct device *dev)
devm_kfree(dev, mdrv);
}
-static int komeda_bind(struct device *dev)
+static int komeda_platform_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct komeda_drv *mdrv;
int err;
@@ -91,52 +89,6 @@ free_mdrv:
return err;
}
-static const struct component_master_ops komeda_master_ops = {
- .bind = komeda_bind,
- .unbind = komeda_unbind,
-};
-
-static void komeda_add_slave(struct device *master,
- struct component_match **match,
- struct device_node *np,
- u32 port, u32 endpoint)
-{
- struct device_node *remote;
-
- remote = of_graph_get_remote_node(np, port, endpoint);
- if (remote) {
- drm_of_component_match_add(master, match, component_compare_of, remote);
- of_node_put(remote);
- }
-}
-
-static int komeda_platform_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct component_match *match = NULL;
- struct device_node *child;
-
- if (!dev->of_node)
- return -ENODEV;
-
- for_each_available_child_of_node(dev->of_node, child) {
- if (of_node_cmp(child->name, "pipeline") != 0)
- continue;
-
- /* add connector */
- komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 0);
- komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 1);
- }
-
- return component_master_add_with_match(dev, &komeda_master_ops, match);
-}
-
-static int komeda_platform_remove(struct platform_device *pdev)
-{
- component_master_del(&pdev->dev, &komeda_master_ops);
- return 0;
-}
-
static const struct of_device_id komeda_of_match[] = {
{ .compatible = "arm,mali-d71", .data = d71_identify, },
{ .compatible = "arm,mali-d32", .data = d71_identify, },
@@ -189,7 +141,7 @@ static const struct dev_pm_ops komeda_pm_ops = {
static struct platform_driver komeda_platform_driver = {
.probe = komeda_platform_probe,
- .remove = komeda_platform_remove,
+ .remove_new = komeda_platform_remove,
.driver = {
.name = "komeda",
.of_match_table = komeda_of_match,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 62dc64550793..9299026701f3 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -4,7 +4,6 @@
* Author: James.Qian.Wang <[email protected]>
*
*/
-#include <linux/component.h>
#include <linux/interrupt.h>
#include <drm/drm_atomic.h>
@@ -305,17 +304,13 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
if (err)
goto cleanup_mode_config;
- err = component_bind_all(mdev->dev, kms);
- if (err)
- goto cleanup_mode_config;
-
drm_mode_config_reset(drm);
err = devm_request_irq(drm->dev, mdev->irq,
komeda_kms_irq_handler, IRQF_SHARED,
drm->driver->name, drm);
if (err)
- goto free_component_binding;
+ goto cleanup_mode_config;
drm_kms_helper_poll_init(drm);
@@ -327,8 +322,6 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
free_interrupts:
drm_kms_helper_poll_fini(drm);
-free_component_binding:
- component_unbind_all(mdev->dev, drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
@@ -339,12 +332,10 @@ cleanup_mode_config:
void komeda_kms_detach(struct komeda_kms_dev *kms)
{
struct drm_device *drm = &kms->base;
- struct komeda_dev *mdev = drm->dev_private;
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
- component_unbind_all(mdev->dev, drm);
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 3a872c292091..6ef655326357 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -84,6 +84,9 @@ struct komeda_crtc {
/** @disable_done: this flip_done is for tracing the disable */
struct completion *disable_done;
+
+ /** @encoder: encoder at the end of the pipeline */
+ struct drm_encoder encoder;
};
/**
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 12f5a2c7f03d..aa06f9838015 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -367,10 +367,9 @@ static int hdlcd_probe(struct platform_device *pdev)
match);
}
-static int hdlcd_remove(struct platform_device *pdev)
+static void hdlcd_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &hdlcd_master_ops);
- return 0;
}
static const struct of_device_id hdlcd_of_match[] = {
@@ -399,7 +398,7 @@ static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume);
static struct platform_driver hdlcd_platform_driver = {
.probe = hdlcd_probe,
- .remove = hdlcd_remove,
+ .remove_new = hdlcd_remove,
.driver = {
.name = "hdlcd",
.pm = &hdlcd_pm_ops,
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index c03cfd57b752..62329d5dd992 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -12,6 +12,7 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/debugfs.h>
@@ -935,10 +936,9 @@ static int malidp_platform_probe(struct platform_device *pdev)
match);
}
-static int malidp_platform_remove(struct platform_device *pdev)
+static void malidp_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &malidp_master_ops);
- return 0;
}
static int __maybe_unused malidp_pm_suspend(struct device *dev)
@@ -981,7 +981,7 @@ static const struct dev_pm_ops malidp_pm_ops = {
static struct platform_driver malidp_platform_driver = {
.probe = malidp_platform_probe,
- .remove = malidp_platform_remove,
+ .remove_new = malidp_platform_remove,
.driver = {
.name = "mali-dp",
.pm = &malidp_pm_ops,
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
index 5afade25e217..e5597d7c9ae1 100644
--- a/drivers/gpu/drm/armada/Kconfig
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -3,7 +3,7 @@ config DRM_ARMADA
tristate "DRM support for Marvell Armada SoCs"
depends on DRM && HAVE_CLK && ARM && MMU
select DRM_KMS_HELPER
- select FB_IO_HELPERS if DRM_FBDEV_EMULATION
+ select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
help
Support the "LCD" controllers found on the Marvell Armada 510
devices. There are two controllers on the device, each controller
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index e120144d4b47..e8d2fe955909 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -37,8 +37,6 @@ static const struct drm_ioctl_desc armada_ioctls[] = {
DEFINE_DRM_GEM_FOPS(armada_drm_fops);
static const struct drm_driver armada_drm_driver = {
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = armada_gem_prime_import,
.dumb_create = armada_gem_dumb_create,
.major = 1,
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 3943e89cc06c..d223176912b6 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -34,7 +34,7 @@ static void armada_fbdev_fb_destroy(struct fb_info *info)
static const struct fb_ops armada_fb_ops = {
.owner = THIS_MODULE,
- FB_DEFAULT_IO_OPS,
+ FB_DEFAULT_IOMEM_OPS,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_destroy = armada_fbdev_fb_destroy,
};
@@ -209,10 +209,6 @@ void armada_fbdev_setup(struct drm_device *dev)
goto err_drm_client_init;
}
- ret = armada_fbdev_client_hotplug(&fbh->client);
- if (ret)
- drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
drm_client_register(&fbh->client);
return;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index f21eb8fb76d8..3b9bd8ecda13 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -4,6 +4,8 @@
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
+#include <linux/bitfield.h>
+
#include <drm/armada_drm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -445,8 +447,8 @@ static int armada_overlay_get_property(struct drm_plane *plane,
drm_to_overlay_state(state)->colorkey_ug,
drm_to_overlay_state(state)->colorkey_vb, 0);
} else if (property == priv->colorkey_mode_prop) {
- *val = (drm_to_overlay_state(state)->colorkey_mode &
- CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK);
+ *val = FIELD_GET(CFG_CKMODE_MASK,
+ drm_to_overlay_state(state)->colorkey_mode);
} else if (property == priv->brightness_prop) {
*val = drm_to_overlay_state(state)->brightness + 256;
} else if (property == priv->contrast_prop) {
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index c8c7f8215155..d207b03f8357 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -351,20 +351,18 @@ err_unload:
return ret;
}
-static int aspeed_gfx_remove(struct platform_device *pdev)
+static void aspeed_gfx_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
drm_dev_unregister(drm);
aspeed_gfx_unload(drm);
-
- return 0;
}
static struct platform_driver aspeed_gfx_platform_driver = {
.probe = aspeed_gfx_probe,
- .remove = aspeed_gfx_remove,
+ .remove_new = aspeed_gfx_remove,
.driver = {
.name = "aspeed_gfx",
.of_match_table = aspeed_gfx_match,
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index fbb070f63e36..fdd9a493aa9c 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -7,6 +7,17 @@
#include <drm/drm_print.h>
#include "ast_drv.h"
+bool ast_astdp_is_connected(struct ast_device *ast)
+{
+ if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING))
+ return false;
+ if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD))
+ return false;
+ if (!ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, ASTDP_LINK_SUCCESS))
+ return false;
+ return true;
+}
+
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_device *ast = to_ast_device(dev);
@@ -119,53 +130,32 @@ err_astdp_edid_not_ready:
/*
* Launch Aspeed DP
*/
-void ast_dp_launch(struct drm_device *dev, u8 bPower)
+void ast_dp_launch(struct drm_device *dev)
{
- u32 i = 0, j = 0, WaitCount = 1;
- u8 bDPTX = 0;
+ u32 i = 0;
u8 bDPExecute = 1;
-
struct ast_device *ast = to_ast_device(dev);
- // S3 come back, need more time to wait BMC ready.
- if (bPower)
- WaitCount = 300;
-
-
- // Wait total count by different condition.
- for (j = 0; j < WaitCount; j++) {
- bDPTX = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK);
-
- if (bDPTX)
- break;
+ // Wait one second then timeout.
+ while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING) !=
+ ASTDP_MCU_FW_EXECUTING) {
+ i++;
+ // wait 100 ms
msleep(100);
- }
- // 0xE : ASTDP with DPMCU FW handling
- if (bDPTX == ASTDP_DPMCU_TX) {
- // Wait one second then timeout.
- i = 0;
-
- while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, COPROCESSOR_LAUNCH) !=
- COPROCESSOR_LAUNCH) {
- i++;
- // wait 100 ms
- msleep(100);
-
- if (i >= 10) {
- // DP would not be ready.
- bDPExecute = 0;
- break;
- }
+ if (i >= 10) {
+ // DP would not be ready.
+ bDPExecute = 0;
+ break;
}
+ }
- if (bDPExecute)
- ast->tx_chip_types |= BIT(AST_TX_ASTDP);
+ if (!bDPExecute)
+ drm_err(dev, "Wait DPMCU executing timeout\n");
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
- (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- ASTDP_HOST_EDID_READ_DONE);
- }
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
+ (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
+ ASTDP_HOST_EDID_READ_DONE);
}
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 1bc35a992369..f10d53b0c94f 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -272,11 +272,9 @@ static bool ast_launch_m68k(struct drm_device *dev)
return true;
}
-bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+bool ast_dp501_is_connected(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
- u32 i, boot_address, offset, data;
- u32 *pEDIDidx;
+ u32 boot_address, offset, data;
if (ast->config_mode == ast_use_p2a) {
boot_address = get_fw_base(ast);
@@ -292,14 +290,6 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
data = ast_mindwm(ast, boot_address + offset);
if (!(data & AST_DP501_PNP_CONNECTED))
return false;
-
- /* Read EDID */
- offset = AST_DP501_EDID_DATA;
- for (i = 0; i < 128; i += 4) {
- data = ast_mindwm(ast, boot_address + offset + i);
- pEDIDidx = (u32 *)(ediddata + i);
- *pEDIDidx = data;
- }
} else {
if (!ast->dp501_fw_buf)
return false;
@@ -319,10 +309,33 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
data = readl(ast->dp501_fw_buf + offset);
if (!(data & AST_DP501_PNP_CONNECTED))
return false;
+ }
+ return true;
+}
+
+bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
+{
+ struct ast_device *ast = to_ast_device(dev);
+ u32 i, boot_address, offset, data;
+ u32 *pEDIDidx;
+
+ if (!ast_dp501_is_connected(ast))
+ return false;
+
+ if (ast->config_mode == ast_use_p2a) {
+ boot_address = get_fw_base(ast);
/* Read EDID */
offset = AST_DP501_EDID_DATA;
for (i = 0; i < 128; i += 4) {
+ data = ast_mindwm(ast, boot_address + offset + i);
+ pEDIDidx = (u32 *)(ediddata + i);
+ *pEDIDidx = data;
+ }
+ } else {
+ /* Read EDID */
+ offset = AST_DP501_EDID_DATA;
+ for (i = 0; i < 128; i += 4) {
data = readl(ast->dp501_fw_buf + offset + i);
pEDIDidx = (u32 *)(ediddata + i);
*pEDIDidx = data;
@@ -350,7 +363,7 @@ static bool ast_init_dvo(struct drm_device *dev)
data |= 0x00000500;
ast_write32(ast, 0x12008, data);
- if (ast->chip == AST2300) {
+ if (IS_AST_GEN4(ast)) {
data = ast_read32(ast, 0x12084);
/* multi-pins for DVO single-edge */
data |= 0xfffe0000;
@@ -366,7 +379,7 @@ static bool ast_init_dvo(struct drm_device *dev)
data &= 0xffffffcf;
data |= 0x00000020;
ast_write32(ast, 0x12090, data);
- } else { /* AST2400 */
+ } else { /* AST GEN5+ */
data = ast_read32(ast, 0x12088);
/* multi-pins for DVO single-edge */
data |= 0x30000000;
@@ -437,7 +450,7 @@ void ast_init_3rdtx(struct drm_device *dev)
struct ast_device *ast = to_ast_device(dev);
u8 jreg;
- if (ast->chip == AST2300 || ast->chip == AST2400) {
+ if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
switch (jreg & 0x0e) {
case 0x04:
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index a501169cddad..848a9f1403e8 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -52,19 +52,38 @@
#define PCI_CHIP_AST2000 0x2000
#define PCI_CHIP_AST2100 0x2010
+#define __AST_CHIP(__gen, __index) ((__gen) << 16 | (__index))
enum ast_chip {
- AST2000,
- AST2100,
- AST1100,
- AST2200,
- AST2150,
- AST2300,
- AST2400,
- AST2500,
- AST2600,
+ /* 1st gen */
+ AST1000 = __AST_CHIP(1, 0), // unused
+ AST2000 = __AST_CHIP(1, 1),
+ /* 2nd gen */
+ AST1100 = __AST_CHIP(2, 0),
+ AST2100 = __AST_CHIP(2, 1),
+ AST2050 = __AST_CHIP(2, 2), // unused
+ /* 3rd gen */
+ AST2200 = __AST_CHIP(3, 0),
+ AST2150 = __AST_CHIP(3, 1),
+ /* 4th gen */
+ AST2300 = __AST_CHIP(4, 0),
+ AST1300 = __AST_CHIP(4, 1),
+ AST1050 = __AST_CHIP(4, 2), // unused
+ /* 5th gen */
+ AST2400 = __AST_CHIP(5, 0),
+ AST1400 = __AST_CHIP(5, 1),
+ AST1250 = __AST_CHIP(5, 2), // unused
+ /* 6th gen */
+ AST2500 = __AST_CHIP(6, 0),
+ AST2510 = __AST_CHIP(6, 1),
+ AST2520 = __AST_CHIP(6, 2), // unused
+ /* 7th gen */
+ AST2600 = __AST_CHIP(7, 0),
+ AST2620 = __AST_CHIP(7, 1), // unused
};
+#define __AST_CHIP_GEN(__chip) (((unsigned long)(__chip)) >> 16)
+
enum ast_tx_chip {
AST_TX_NONE,
AST_TX_SIL164,
@@ -166,7 +185,6 @@ struct ast_device {
void __iomem *dp501_fw_buf;
enum ast_chip chip;
- bool vga2_clone;
uint32_t dram_bus_width;
uint32_t dram_type;
uint32_t mclk;
@@ -196,6 +214,10 @@ struct ast_device {
struct drm_encoder encoder;
struct drm_connector connector;
} astdp;
+ struct {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ } bmc;
} output;
bool support_wide_screen;
@@ -219,6 +241,24 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
struct pci_dev *pdev,
unsigned long flags);
+static inline unsigned long __ast_gen(struct ast_device *ast)
+{
+ return __AST_CHIP_GEN(ast->chip);
+}
+#define AST_GEN(__ast) __ast_gen(__ast)
+
+static inline bool __ast_gen_is_eq(struct ast_device *ast, unsigned long gen)
+{
+ return __ast_gen(ast) == gen;
+}
+#define IS_AST_GEN1(__ast) __ast_gen_is_eq(__ast, 1)
+#define IS_AST_GEN2(__ast) __ast_gen_is_eq(__ast, 2)
+#define IS_AST_GEN3(__ast) __ast_gen_is_eq(__ast, 3)
+#define IS_AST_GEN4(__ast) __ast_gen_is_eq(__ast, 4)
+#define IS_AST_GEN5(__ast) __ast_gen_is_eq(__ast, 5)
+#define IS_AST_GEN6(__ast) __ast_gen_is_eq(__ast, 6)
+#define IS_AST_GEN7(__ast) __ast_gen_is_eq(__ast, 7)
+
#define AST_IO_AR_PORT_WRITE (0x40)
#define AST_IO_MISC_PORT_WRITE (0x42)
#define AST_IO_VGA_ENABLE_PORT (0x43)
@@ -258,26 +298,35 @@ static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val)
iowrite8(val, ast->ioregs + reg);
}
-static inline void ast_set_index_reg(struct ast_device *ast,
- uint32_t base, uint8_t index,
- uint8_t val)
+static inline u8 ast_get_index_reg(struct ast_device *ast, u32 base, u8 index)
{
ast_io_write8(ast, base, index);
++base;
- ast_io_write8(ast, base, val);
+ return ast_io_read8(ast, base);
}
-void ast_set_index_reg_mask(struct ast_device *ast,
- uint32_t base, uint8_t index,
- uint8_t mask, uint8_t val);
-uint8_t ast_get_index_reg(struct ast_device *ast,
- uint32_t base, uint8_t index);
-uint8_t ast_get_index_reg_mask(struct ast_device *ast,
- uint32_t base, uint8_t index, uint8_t mask);
+static inline u8 ast_get_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
+ u8 preserve_mask)
+{
+ u8 val = ast_get_index_reg(ast, base, index);
+
+ return val & preserve_mask;
+}
-static inline void ast_open_key(struct ast_device *ast)
+static inline void ast_set_index_reg(struct ast_device *ast, u32 base, u8 index, u8 val)
{
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
+ ast_io_write8(ast, base, index);
+ ++base;
+ ast_io_write8(ast, base, val);
+}
+
+static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
+ u8 preserve_mask, u8 val)
+{
+ u8 tmp = ast_get_index_reg_mask(ast, base, index, preserve_mask);
+
+ tmp |= val;
+ ast_set_index_reg(ast, base, index, tmp);
}
#define AST_VIDMEM_SIZE_8M 0x00800000
@@ -350,9 +399,6 @@ int ast_mode_config_init(struct ast_device *ast);
#define AST_DP501_LINKRATE 0xf014
#define AST_DP501_EDID_DATA 0xf020
-/* Define for Soc scratched reg */
-#define COPROCESSOR_LAUNCH BIT(5)
-
/*
* Display Transmitter Type:
*/
@@ -461,9 +507,6 @@ int ast_mode_config_init(struct ast_device *ast);
int ast_mm_init(struct ast_device *ast);
/* ast post */
-void ast_enable_vga(struct drm_device *dev);
-void ast_enable_mmio(struct drm_device *dev);
-bool ast_is_vga_enabled(struct drm_device *dev);
void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
@@ -471,6 +514,7 @@ void ast_patch_ahb_2500(struct ast_device *ast);
/* ast dp501 */
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
+bool ast_dp501_is_connected(struct ast_device *ast);
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
@@ -479,8 +523,9 @@ void ast_init_3rdtx(struct drm_device *dev);
struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
/* aspeed DP */
+bool ast_astdp_is_connected(struct ast_device *ast);
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
-void ast_dp_launch(struct drm_device *dev, u8 bPower);
+void ast_dp_launch(struct drm_device *dev);
void ast_dp_power_on_off(struct drm_device *dev, bool no);
void ast_dp_set_on_off(struct drm_device *dev, bool no);
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f32ce29edba7..dae365ed3969 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -35,131 +35,153 @@
#include "ast_drv.h"
-void ast_set_index_reg_mask(struct ast_device *ast,
- uint32_t base, uint8_t index,
- uint8_t mask, uint8_t val)
+static bool ast_is_vga_enabled(struct drm_device *dev)
{
- u8 tmp;
- ast_io_write8(ast, base, index);
- tmp = (ast_io_read8(ast, base + 1) & mask) | val;
- ast_set_index_reg(ast, base, index, tmp);
+ struct ast_device *ast = to_ast_device(dev);
+ u8 ch;
+
+ ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
+
+ return !!(ch & 0x01);
}
-uint8_t ast_get_index_reg(struct ast_device *ast,
- uint32_t base, uint8_t index)
+static void ast_enable_vga(struct drm_device *dev)
{
- uint8_t ret;
- ast_io_write8(ast, base, index);
- ret = ast_io_read8(ast, base + 1);
- return ret;
+ struct ast_device *ast = to_ast_device(dev);
+
+ ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
+ ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
}
-uint8_t ast_get_index_reg_mask(struct ast_device *ast,
- uint32_t base, uint8_t index, uint8_t mask)
+/*
+ * Run this function as part of the HW device cleanup; not
+ * when the DRM device gets released.
+ */
+static void ast_enable_mmio_release(void *data)
{
- uint8_t ret;
- ast_io_write8(ast, base, index);
- ret = ast_io_read8(ast, base + 1) & mask;
- return ret;
+ struct ast_device *ast = data;
+
+ /* enable standard VGA decode */
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
}
-static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
+static int ast_enable_mmio(struct ast_device *ast)
{
- struct device_node *np = dev->dev->of_node;
- struct ast_device *ast = to_ast_device(dev);
+ struct drm_device *dev = &ast->base;
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
+
+ return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast);
+}
+
+static void ast_open_key(struct ast_device *ast)
+{
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
+}
+
+static int ast_device_config_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- uint32_t data, jregd0, jregd1;
+ struct device_node *np = dev->dev->of_node;
+ uint32_t scu_rev = 0xffffffff;
+ u32 data;
+ u8 jregd0, jregd1;
+
+ /*
+ * Find configuration mode and read SCU revision
+ */
- /* Defaults */
ast->config_mode = ast_use_defaults;
- *scu_rev = 0xffffffff;
/* Check if we have device-tree properties */
- if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
- scu_rev)) {
+ if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) {
/* We do, disable P2A access */
ast->config_mode = ast_use_dt;
- drm_info(dev, "Using device-tree for configuration\n");
- return;
- }
+ scu_rev = data;
+ } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge
+ /*
+ * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
+ * is disabled. We force using P2A if VGA only mode bit
+ * is set D[7]
+ */
+ jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
+
+ /*
+ * We have a P2A bridge and it is enabled.
+ */
+
+ /* Patch AST2500/AST2510 */
+ if ((pdev->revision & 0xf0) == 0x40) {
+ if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK))
+ ast_patch_ahb_2500(ast);
+ }
- /* Not all families have a P2A bridge */
- if (pdev->device != PCI_CHIP_AST2000)
- return;
+ /* Double check that it's actually working */
+ data = ast_read32(ast, 0xf004);
+ if ((data != 0xffffffff) && (data != 0x00)) {
+ ast->config_mode = ast_use_p2a;
- /*
- * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
- * is disabled. We force using P2A if VGA only mode bit
- * is set D[7]
- */
- jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
- jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
- if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
- /* Patch AST2500 */
- if (((pdev->revision & 0xF0) == 0x40)
- && ((jregd0 & AST_VRAM_INIT_STATUS_MASK) == 0))
- ast_patch_ahb_2500(ast);
-
- /* Double check it's actually working */
- data = ast_read32(ast, 0xf004);
- if ((data != 0xFFFFFFFF) && (data != 0x00)) {
- /* P2A works, grab silicon revision */
- ast->config_mode = ast_use_p2a;
-
- drm_info(dev, "Using P2A bridge for configuration\n");
-
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- *scu_rev = ast_read32(ast, 0x1207c);
- return;
+ /* Read SCU7c (silicon revision register) */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ scu_rev = ast_read32(ast, 0x1207c);
+ }
}
}
- /* We have a P2A bridge but it's disabled */
- drm_info(dev, "P2A bridge disabled, using default configuration\n");
-}
-
-static int ast_detect_chip(struct drm_device *dev, bool *need_post)
-{
- struct ast_device *ast = to_ast_device(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- uint32_t jreg, scu_rev;
+ switch (ast->config_mode) {
+ case ast_use_defaults:
+ drm_info(dev, "Using default configuration\n");
+ break;
+ case ast_use_dt:
+ drm_info(dev, "Using device-tree for configuration\n");
+ break;
+ case ast_use_p2a:
+ drm_info(dev, "Using P2A bridge for configuration\n");
+ break;
+ }
/*
- * If VGA isn't enabled, we need to enable now or subsequent
- * access to the scratch registers will fail. We also inform
- * our caller that it needs to POST the chip
- * (Assumption: VGA not enabled -> need to POST)
+ * Identify chipset
*/
- if (!ast_is_vga_enabled(dev)) {
- ast_enable_vga(dev);
- drm_info(dev, "VGA not enabled on entry, requesting chip POST\n");
- *need_post = true;
- } else
- *need_post = false;
-
-
- /* Enable extended register access */
- ast_open_key(ast);
- ast_enable_mmio(dev);
-
- /* Find out whether P2A works or whether to use device-tree */
- ast_detect_config_mode(dev, &scu_rev);
- /* Identify chipset */
if (pdev->revision >= 0x50) {
ast->chip = AST2600;
drm_info(dev, "AST 2600 detected\n");
} else if (pdev->revision >= 0x40) {
- ast->chip = AST2500;
- drm_info(dev, "AST 2500 detected\n");
+ switch (scu_rev & 0x300) {
+ case 0x0100:
+ ast->chip = AST2510;
+ drm_info(dev, "AST 2510 detected\n");
+ break;
+ default:
+ ast->chip = AST2500;
+ drm_info(dev, "AST 2500 detected\n");
+ }
} else if (pdev->revision >= 0x30) {
- ast->chip = AST2400;
- drm_info(dev, "AST 2400 detected\n");
+ switch (scu_rev & 0x300) {
+ case 0x0100:
+ ast->chip = AST1400;
+ drm_info(dev, "AST 1400 detected\n");
+ break;
+ default:
+ ast->chip = AST2400;
+ drm_info(dev, "AST 2400 detected\n");
+ }
} else if (pdev->revision >= 0x20) {
- ast->chip = AST2300;
- drm_info(dev, "AST 2300 detected\n");
+ switch (scu_rev & 0x300) {
+ case 0x0000:
+ ast->chip = AST1300;
+ drm_info(dev, "AST 1300 detected\n");
+ break;
+ default:
+ ast->chip = AST2300;
+ drm_info(dev, "AST 2300 detected\n");
+ break;
+ }
} else if (pdev->revision >= 0x10) {
switch (scu_rev & 0x0300) {
case 0x0200:
@@ -179,15 +201,21 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
drm_info(dev, "AST 2100 detected\n");
break;
}
- ast->vga2_clone = false;
} else {
ast->chip = AST2000;
drm_info(dev, "AST 2000 detected\n");
}
+ return 0;
+}
+
+static void ast_detect_widescreen(struct ast_device *ast)
+{
+ u8 jreg;
+
/* Check if we support wide screen */
- switch (ast->chip) {
- case AST2000:
+ switch (AST_GEN(ast)) {
+ case 1:
ast->support_wide_screen = false;
break;
default:
@@ -198,20 +226,23 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
- if (ast->chip == AST2300 &&
- (scu_rev & 0x300) == 0x0) /* ast1300 */
+ if (ast->chip == AST1300)
ast->support_wide_screen = true;
- if (ast->chip == AST2400 &&
- (scu_rev & 0x300) == 0x100) /* ast1400 */
+ if (ast->chip == AST1400)
ast->support_wide_screen = true;
- if (ast->chip == AST2500 &&
- scu_rev == 0x100) /* ast2510 */
+ if (ast->chip == AST2510)
ast->support_wide_screen = true;
- if (ast->chip == AST2600) /* ast2600 */
+ if (IS_AST_GEN7(ast))
ast->support_wide_screen = true;
}
break;
}
+}
+
+static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
+{
+ struct drm_device *dev = &ast->base;
+ u8 jreg;
/* Check 3rd Tx option (digital output afaik) */
ast->tx_chip_types |= AST_TX_NONE_BIT;
@@ -224,15 +255,15 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
* is at power-on reset, otherwise we'll incorrectly "detect" a
* SIL164 when there is none.
*/
- if (!*need_post) {
+ if (!need_post) {
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff);
if (jreg & 0x80)
ast->tx_chip_types = AST_TX_SIL164_BIT;
}
- if ((ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST2500)) {
+ if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) {
/*
- * On AST2300 and 2400, look the configuration set by the SoC in
+ * On AST GEN4+, look the configuration set by the SoC in
* the SOC scratch register #1 bits 11:8 (interestingly marked
* as "reserved" in the spec)
*/
@@ -254,8 +285,13 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
case 0x0c:
ast->tx_chip_types = AST_TX_DP501_BIT;
}
- } else if (ast->chip == AST2600)
- ast_dp_launch(&ast->base, 0);
+ } else if (IS_AST_GEN7(ast)) {
+ if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK) ==
+ ASTDP_DPMCU_TX) {
+ ast->tx_chip_types = AST_TX_ASTDP_BIT;
+ ast_dp_launch(&ast->base);
+ }
+ }
/* Print stuff for diagnostic purposes */
if (ast->tx_chip_types & AST_TX_NONE_BIT)
@@ -264,8 +300,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
drm_info(dev, "Using Sil164 TMDS transmitter\n");
if (ast->tx_chip_types & AST_TX_DP501_BIT)
drm_info(dev, "Using DP501 DisplayPort transmitter\n");
-
- return 0;
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
+ drm_info(dev, "Using ASPEED DisplayPort transmitter\n");
}
static int ast_get_dram_info(struct drm_device *dev)
@@ -279,7 +315,7 @@ static int ast_get_dram_info(struct drm_device *dev)
case ast_use_dt:
/*
* If some properties are missing, use reasonable
- * defaults for AST2400
+ * defaults for GEN5
*/
if (of_property_read_u32(np, "aspeed,mcr-configuration",
&mcr_cfg))
@@ -302,7 +338,7 @@ static int ast_get_dram_info(struct drm_device *dev)
default:
ast->dram_bus_width = 16;
ast->dram_type = AST_DRAM_1Gx16;
- if (ast->chip == AST2500)
+ if (IS_AST_GEN6(ast))
ast->mclk = 800;
else
ast->mclk = 396;
@@ -314,7 +350,7 @@ static int ast_get_dram_info(struct drm_device *dev)
else
ast->dram_bus_width = 32;
- if (ast->chip == AST2500) {
+ if (IS_AST_GEN6(ast)) {
switch (mcr_cfg & 0x03) {
case 0:
ast->dram_type = AST_DRAM_1Gx16;
@@ -330,7 +366,7 @@ static int ast_get_dram_info(struct drm_device *dev)
ast->dram_type = AST_DRAM_8Gx16;
break;
}
- } else if (ast->chip == AST2300 || ast->chip == AST2400) {
+ } else if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
switch (mcr_cfg & 0x03) {
case 0:
ast->dram_type = AST_DRAM_512Mx16;
@@ -388,25 +424,13 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-/*
- * Run this function as part of the HW device cleanup; not
- * when the DRM device gets released.
- */
-static void ast_device_release(void *data)
-{
- struct ast_device *ast = data;
-
- /* enable standard VGA decode */
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
-}
-
struct ast_device *ast_device_create(const struct drm_driver *drv,
struct pci_dev *pdev,
unsigned long flags)
{
struct drm_device *dev;
struct ast_device *ast;
- bool need_post;
+ bool need_post = false;
int ret = 0;
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
@@ -442,7 +466,30 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
return ERR_PTR(-EIO);
}
- ast_detect_chip(dev, &need_post);
+ if (!ast_is_vga_enabled(dev)) {
+ drm_info(dev, "VGA not enabled on entry, requesting chip POST\n");
+ need_post = true;
+ }
+
+ /*
+ * If VGA isn't enabled, we need to enable now or subsequent
+ * access to the scratch registers will fail.
+ */
+ if (need_post)
+ ast_enable_vga(dev);
+
+ /* Enable extended register access */
+ ast_open_key(ast);
+ ret = ast_enable_mmio(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = ast_device_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ast_detect_widescreen(ast);
+ ast_detect_tx_chip(ast, need_post);
ret = ast_get_dram_info(dev);
if (ret)
@@ -470,9 +517,5 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
if (ret)
return ERR_PTR(ret);
- ret = devm_add_action_or_reset(dev->dev, ast_device_release, ast);
- if (ret)
- return ERR_PTR(ret);
-
return ast;
}
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index e16af60deef9..bc174bd933b9 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -38,8 +38,6 @@ static u32 ast_get_vram_size(struct ast_device *ast)
u8 jreg;
u32 vram_size;
- ast_open_key(ast);
-
vram_size = AST_VIDMEM_DEFAULT_SIZE;
jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
switch (jreg & 3) {
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 36374828f6c8..32f04ec6c386 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -342,7 +342,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
u16 temp, precache = 0;
- if ((ast->chip == AST2500 || ast->chip == AST2600) &&
+ if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) &&
(vbios_mode->enh_table->flags & AST2500PreCatchCRT))
precache = 40;
@@ -384,7 +384,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
// Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels);
- if ((ast->chip == AST2600) && (mode->crtc_vdisplay == 1080))
+ if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080))
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x02);
else
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x00);
@@ -466,7 +466,7 @@ static void ast_set_dclk_reg(struct ast_device *ast,
{
const struct ast_vbios_dclk_info *clk_info;
- if ((ast->chip == AST2500) || (ast->chip == AST2600))
+ if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast))
clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index];
else
clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
@@ -510,17 +510,13 @@ static void ast_set_color_reg(struct ast_device *ast,
static void ast_set_crtthd_reg(struct ast_device *ast)
{
/* Set Threshold */
- if (ast->chip == AST2600) {
+ if (IS_AST_GEN7(ast)) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0);
- } else if (ast->chip == AST2300 || ast->chip == AST2400 ||
- ast->chip == AST2500) {
+ } else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
- } else if (ast->chip == AST2100 ||
- ast->chip == AST1100 ||
- ast->chip == AST2200 ||
- ast->chip == AST2150) {
+ } else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
} else {
@@ -1082,9 +1078,10 @@ ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode
if ((mode->hdisplay == 1152) && (mode->vdisplay == 864))
return MODE_OK;
- if ((ast->chip == AST2100) || (ast->chip == AST2200) ||
- (ast->chip == AST2300) || (ast->chip == AST2400) ||
- (ast->chip == AST2500) || (ast->chip == AST2600)) {
+ if ((ast->chip == AST2100) || // GEN2, but not AST1100 (?)
+ (ast->chip == AST2200) || // GEN3, but not AST2150 (?)
+ IS_AST_GEN4(ast) || IS_AST_GEN5(ast) ||
+ IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) {
if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080))
return MODE_OK;
@@ -1585,8 +1582,20 @@ err_drm_connector_update_edid_property:
return 0;
}
+static int ast_dp501_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct ast_device *ast = to_ast_device(connector->dev);
+
+ if (ast_dp501_is_connected(ast))
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = {
.get_modes = ast_dp501_connector_helper_get_modes,
+ .detect_ctx = ast_dp501_connector_helper_detect_ctx,
};
static const struct drm_connector_funcs ast_dp501_connector_funcs = {
@@ -1611,7 +1620,7 @@ static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
}
@@ -1647,6 +1656,8 @@ static int ast_dp501_output_init(struct ast_device *ast)
static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
{
void *edid;
+ struct drm_device *dev = connector->dev;
+ struct ast_device *ast = to_ast_device(dev);
int succ;
int count;
@@ -1655,9 +1666,17 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
if (!edid)
goto err_drm_connector_update_edid_property;
+ /*
+ * Protect access to I/O registers from concurrent modesetting
+ * by acquiring the I/O-register lock.
+ */
+ mutex_lock(&ast->ioregs_lock);
+
succ = ast_astdp_read_edid(connector->dev, edid);
if (succ < 0)
- goto err_kfree;
+ goto err_mutex_unlock;
+
+ mutex_unlock(&ast->ioregs_lock);
drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
@@ -1665,15 +1684,28 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
return count;
-err_kfree:
+err_mutex_unlock:
+ mutex_unlock(&ast->ioregs_lock);
kfree(edid);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
return 0;
}
+static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct ast_device *ast = to_ast_device(connector->dev);
+
+ if (ast_astdp_is_connected(ast))
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
.get_modes = ast_astdp_connector_helper_get_modes,
+ .detect_ctx = ast_astdp_connector_helper_detect_ctx,
};
static const struct drm_connector_funcs ast_astdp_connector_funcs = {
@@ -1698,7 +1730,7 @@ static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
}
@@ -1728,6 +1760,60 @@ static int ast_astdp_output_init(struct ast_device *ast)
}
/*
+ * BMC virtual Connector
+ */
+
+static const struct drm_encoder_funcs ast_bmc_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
+{
+ return drm_add_modes_noedid(connector, 4096, 4096);
+}
+
+static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = {
+ .get_modes = ast_bmc_connector_helper_get_modes,
+};
+
+static const struct drm_connector_funcs ast_bmc_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ast_bmc_output_init(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
+ struct drm_encoder *encoder = &ast->output.bmc.encoder;
+ struct drm_connector *connector = &ast->output.bmc.connector;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder,
+ &ast_bmc_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, "ast_bmc");
+ if (ret)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
* Mode config
*/
@@ -1789,12 +1875,12 @@ int ast_mode_config_init(struct ast_device *ast)
dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
- if (ast->chip == AST2100 ||
- ast->chip == AST2200 ||
- ast->chip == AST2300 ||
- ast->chip == AST2400 ||
- ast->chip == AST2500 ||
- ast->chip == AST2600) {
+ if (ast->chip == AST2100 || // GEN2, but not AST1100 (?)
+ ast->chip == AST2200 || // GEN3, but not AST2150 (?)
+ IS_AST_GEN7(ast) ||
+ IS_AST_GEN6(ast) ||
+ IS_AST_GEN5(ast) ||
+ IS_AST_GEN4(ast)) {
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 2048;
} else {
@@ -1834,8 +1920,13 @@ int ast_mode_config_init(struct ast_device *ast)
if (ret)
return ret;
}
+ ret = ast_bmc_output_init(ast);
+ if (ret)
+ return ret;
drm_mode_config_reset(dev);
+ drm_kms_helper_poll_init(dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 71bb36b865fd..13e15173f2c5 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -37,41 +37,13 @@
static void ast_post_chip_2300(struct drm_device *dev);
static void ast_post_chip_2500(struct drm_device *dev);
-void ast_enable_vga(struct drm_device *dev)
-{
- struct ast_device *ast = to_ast_device(dev);
-
- ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
- ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
-}
-
-void ast_enable_mmio(struct drm_device *dev)
-{
- struct ast_device *ast = to_ast_device(dev);
-
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
-}
-
-
-bool ast_is_vga_enabled(struct drm_device *dev)
-{
- struct ast_device *ast = to_ast_device(dev);
- u8 ch;
-
- ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
-
- return !!(ch & 0x01);
-}
-
static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
-static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
static void
ast_set_def_ext_reg(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 i, index, reg;
const u8 *ext_reg_info;
@@ -79,13 +51,9 @@ ast_set_def_ext_reg(struct drm_device *dev)
for (i = 0x81; i <= 0x9f; i++)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
- if (ast->chip == AST2300 || ast->chip == AST2400 ||
- ast->chip == AST2500) {
- if (pdev->revision >= 0x20)
- ext_reg_info = extreginfo_ast2300;
- else
- ext_reg_info = extreginfo_ast2300a0;
- } else
+ if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
+ ext_reg_info = extreginfo_ast2300;
+ else
ext_reg_info = extreginfo;
index = 0xa0;
@@ -104,8 +72,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
/* Enable RAMDAC for A1 */
reg = 0x04;
- if (ast->chip == AST2300 || ast->chip == AST2400 ||
- ast->chip == AST2500)
+ if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast))
reg |= 0x20;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
}
@@ -281,7 +248,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
if ((j & 0x80) == 0) { /* VGA only */
- if (ast->chip == AST2000) {
+ if (IS_AST_GEN1(ast)) {
dram_reg_info = ast2000_dram_table_data;
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
@@ -290,8 +257,8 @@ static void ast_init_dram_reg(struct drm_device *dev)
do {
;
} while (ast_read32(ast, 0x10100) != 0xa8);
- } else {/* AST2100/1100 */
- if (ast->chip == AST2100 || ast->chip == 2200)
+ } else { /* GEN2/GEN3 */
+ if (ast->chip == AST2100 || ast->chip == AST2200)
dram_reg_info = ast2100_dram_table_data;
else
dram_reg_info = ast1100_dram_table_data;
@@ -313,7 +280,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
if (dram_reg_info->index == 0xff00) {/* delay fn */
for (i = 0; i < 15; i++)
udelay(dram_reg_info->data);
- } else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) {
+ } else if (dram_reg_info->index == 0x4 && !IS_AST_GEN1(ast)) {
data = dram_reg_info->data;
if (ast->dram_type == AST_DRAM_1Gx16)
data = 0x00000d89;
@@ -339,15 +306,13 @@ static void ast_init_dram_reg(struct drm_device *dev)
cbrdlli_ast2150(ast, 32); /* 32 bits */
}
- switch (ast->chip) {
- case AST2000:
+ switch (AST_GEN(ast)) {
+ case 1:
temp = ast_read32(ast, 0x10140);
ast_write32(ast, 0x10140, temp | 0x40);
break;
- case AST1100:
- case AST2100:
- case AST2200:
- case AST2150:
+ case 2:
+ case 3:
temp = ast_read32(ast, 0x1200c);
ast_write32(ast, 0x1200c, temp & 0xfffffffd);
temp = ast_read32(ast, 0x12040);
@@ -367,24 +332,16 @@ static void ast_init_dram_reg(struct drm_device *dev)
void ast_post_gpu(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- u32 reg;
-
- pci_read_config_dword(pdev, 0x04, &reg);
- reg |= 0x3;
- pci_write_config_dword(pdev, 0x04, reg);
- ast_enable_vga(dev);
- ast_open_key(ast);
- ast_enable_mmio(dev);
ast_set_def_ext_reg(dev);
- if (ast->chip == AST2600) {
- ast_dp_launch(dev, 1);
+ if (IS_AST_GEN7(ast)) {
+ if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
+ ast_dp_launch(dev);
} else if (ast->config_mode == ast_use_p2a) {
- if (ast->chip == AST2500)
+ if (IS_AST_GEN6(ast))
ast_post_chip_2500(dev);
- else if (ast->chip == AST2300 || ast->chip == AST2400)
+ else if (IS_AST_GEN5(ast) || IS_AST_GEN4(ast))
ast_post_chip_2300(dev);
else
ast_init_dram_reg(dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 58184cd6ab0b..cc5cf4c2faf7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -68,7 +68,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
struct regmap *regmap = crtc->dc->hlcdc->regmap;
struct drm_display_mode *adj = &c->state->adjusted_mode;
+ struct drm_encoder *encoder = NULL, *en_iter;
+ struct drm_connector *connector = NULL;
struct atmel_hlcdc_crtc_state *state;
+ struct drm_device *ddev = c->dev;
+ struct drm_connector_list_iter iter;
unsigned long mode_rate;
struct videomode vm;
unsigned long prate;
@@ -76,6 +80,23 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
unsigned int cfg = 0;
int div, ret;
+ /* get encoder from crtc */
+ drm_for_each_encoder(en_iter, ddev) {
+ if (en_iter->crtc == c) {
+ encoder = en_iter;
+ break;
+ }
+ }
+
+ if (encoder) {
+ /* Get the connector from encoder */
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ if (connector->encoder == encoder)
+ break;
+ drm_connector_list_iter_end(&iter);
+ }
+
ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
if (ret)
return;
@@ -134,6 +155,10 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
cfg |= ATMEL_HLCDC_CLKDIV(div);
+ if (connector &&
+ connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ cfg |= ATMEL_HLCDC_CLKPOL;
+
regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0), mask, cfg);
state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 29603561d501..fa0f9a93d50d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -773,15 +773,13 @@ err_put:
return ret;
}
-static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
+static void atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
{
struct drm_device *ddev = platform_get_drvdata(pdev);
drm_dev_unregister(ddev);
atmel_hlcdc_dc_unload(ddev);
drm_dev_put(ddev);
-
- return 0;
}
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
@@ -826,7 +824,7 @@ static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
static struct platform_driver atmel_hlcdc_dc_platform_driver = {
.probe = atmel_hlcdc_dc_drm_probe,
- .remove = atmel_hlcdc_dc_drm_remove,
+ .remove_new = atmel_hlcdc_dc_drm_remove,
.driver = {
.name = "atmel-hlcdc-display-controller",
.pm = pm_sleep_ptr(&atmel_hlcdc_dc_drm_pm_ops),
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 82c68b042444..44a660a4bdbf 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -74,19 +74,19 @@ config DRM_FSL_LDB
Support for i.MX8MP DPI-to-LVDS on-SoC encoder.
config DRM_ITE_IT6505
- tristate "ITE IT6505 DisplayPort bridge"
- depends on OF
+ tristate "ITE IT6505 DisplayPort bridge"
+ depends on OF
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
- select DRM_DP_AUX_BUS
- select DRM_KMS_HELPER
- select DRM_DP_HELPER
- select EXTCON
- select CRYPTO
- select CRYPTO_HASH
- help
- ITE IT6505 DisplayPort bridge chip driver.
+ select DRM_DP_AUX_BUS
+ select DRM_KMS_HELPER
+ select DRM_DP_HELPER
+ select EXTCON
+ select CRYPTO
+ select CRYPTO_HASH
+ help
+ ITE IT6505 DisplayPort bridge chip driver.
config DRM_LONTIUM_LT8912B
tristate "Lontium LT8912B DSI/HDMI bridge"
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index 99964f5a5457..2a6b91f752cb 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -7,7 +7,6 @@
#include <linux/device.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 2254457ab5d0..2611afd2c1c1 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -9,7 +9,7 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <media/cec.h>
@@ -786,8 +786,13 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
- regmap_update_bits(adv7511->regmap, 0xfb,
- 0x6, low_refresh_rate << 1);
+ if (adv7511->type == ADV7511)
+ regmap_update_bits(adv7511->regmap, 0xfb,
+ 0x6, low_refresh_rate << 1);
+ else
+ regmap_update_bits(adv7511->regmap, 0x4a,
+ 0xc, low_refresh_rate << 2);
+
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 72ab2ab77081..c9e35731e6a1 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -813,7 +813,7 @@ MODULE_DEVICE_TABLE(of, anx6345_match_table);
static struct i2c_driver anx6345_driver = {
.driver = {
.name = "anx6345",
- .of_match_table = of_match_ptr(anx6345_match_table),
+ .of_match_table = anx6345_match_table,
},
.probe = anx6345_i2c_probe,
.remove = anx6345_i2c_remove,
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 06a3e3243e19..800555aef97f 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1373,7 +1373,6 @@ static const struct i2c_device_id anx78xx_id[] = {
};
MODULE_DEVICE_TABLE(i2c, anx78xx_id);
-#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id anx78xx_match_table[] = {
{ .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses },
{ .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses },
@@ -1382,12 +1381,11 @@ static const struct of_device_id anx78xx_match_table[] = {
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, anx78xx_match_table);
-#endif
static struct i2c_driver anx78xx_driver = {
.driver = {
.name = "anx7814",
- .of_match_table = of_match_ptr(anx78xx_match_table),
+ .of_match_table = anx78xx_match_table,
},
.probe = anx78xx_i2c_probe,
.remove = anx78xx_i2c_remove,
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 8b985efdc086..51abe42c639e 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -206,7 +206,7 @@ static int anx7625_read_ctrl_status_p0(struct anx7625_data *ctx)
static int wait_aux_op_finish(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
int val;
int ret;
@@ -233,7 +233,7 @@ static int wait_aux_op_finish(struct anx7625_data *ctx)
static int anx7625_aux_trans(struct anx7625_data *ctx, u8 op, u32 address,
u8 len, u8 *buf)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
int ret;
u8 addrh, addrm, addrl;
u8 cmd;
@@ -426,7 +426,7 @@ static int anx7625_odfc_config(struct anx7625_data *ctx,
u8 post_divider)
{
int ret;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
/* Config input reference clock frequency 27MHz/19.2MHz */
ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16,
@@ -476,7 +476,7 @@ static int anx7625_set_k_value(struct anx7625_data *ctx)
static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
unsigned long m, n;
u16 htotal;
int ret;
@@ -574,7 +574,7 @@ static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx)
static int anx7625_swap_dsi_lane3(struct anx7625_data *ctx)
{
int val;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
/* Swap MIPI-DSI data lane 3 P and N */
val = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP);
@@ -591,7 +591,7 @@ static int anx7625_api_dsi_config(struct anx7625_data *ctx)
{
int val, ret;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
/* Swap MIPI-DSI data lane 3 P and N */
ret = anx7625_swap_dsi_lane3(ctx);
@@ -656,7 +656,7 @@ static int anx7625_api_dsi_config(struct anx7625_data *ctx)
static int anx7625_dsi_config(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
int ret;
DRM_DEV_DEBUG_DRIVER(dev, "config dsi.\n");
@@ -688,7 +688,7 @@ static int anx7625_dsi_config(struct anx7625_data *ctx)
static int anx7625_api_dpi_config(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
u16 freq = ctx->dt.pixelclock.min / 1000;
int ret;
@@ -719,7 +719,7 @@ static int anx7625_api_dpi_config(struct anx7625_data *ctx)
static int anx7625_dpi_config(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
int ret;
DRM_DEV_DEBUG_DRIVER(dev, "config dpi\n");
@@ -764,7 +764,7 @@ static int anx7625_read_flash_status(struct anx7625_data *ctx)
static int anx7625_hdcp_key_probe(struct anx7625_data *ctx)
{
int ret, val;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
u8 ident[FLASH_BUF_LEN];
ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
@@ -814,7 +814,7 @@ static int anx7625_hdcp_key_probe(struct anx7625_data *ctx)
static int anx7625_hdcp_key_load(struct anx7625_data *ctx)
{
int ret;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
/* Select HDCP 1.4 KEY */
ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
@@ -842,7 +842,7 @@ static int anx7625_hdcp_key_load(struct anx7625_data *ctx)
static int anx7625_hdcp_disable(struct anx7625_data *ctx)
{
int ret;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
dev_dbg(dev, "disable HDCP 1.4\n");
@@ -863,7 +863,7 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx)
{
u8 bcap;
int ret;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
ret = anx7625_hdcp_key_probe(ctx);
if (ret) {
@@ -872,11 +872,11 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx)
}
/* Read downstream capability */
- ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, 0x68028, 1, &bcap);
+ ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, DP_AUX_HDCP_BCAPS, 1, &bcap);
if (ret < 0)
return ret;
- if (!(bcap & 0x01)) {
+ if (!(bcap & DP_BCAPS_HDCP_CAPABLE)) {
pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap);
return 0;
}
@@ -921,7 +921,7 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx)
static void anx7625_dp_start(struct anx7625_data *ctx)
{
int ret;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
u8 data;
if (!ctx->display_timing_valid) {
@@ -931,8 +931,8 @@ static void anx7625_dp_start(struct anx7625_data *ctx)
dev_dbg(dev, "set downstream sink into normal\n");
/* Downstream sink enter into normal mode */
- data = 1;
- ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data);
+ data = DP_SET_POWER_D0;
+ ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data);
if (ret < 0)
dev_err(dev, "IO error : set sink into normal mode fail\n");
@@ -954,7 +954,7 @@ static void anx7625_dp_start(struct anx7625_data *ctx)
static void anx7625_dp_stop(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
int ret;
u8 data;
@@ -971,8 +971,8 @@ static void anx7625_dp_stop(struct anx7625_data *ctx)
dev_dbg(dev, "notify downstream enter into standby\n");
/* Downstream monitor enter into standby mode */
- data = 2;
- ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data);
+ data = DP_SET_POWER_D3;
+ ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data);
if (ret < 0)
DRM_DEV_ERROR(dev, "IO error : mute video fail\n");
@@ -1019,7 +1019,7 @@ static int sp_tx_aux_rd(struct anx7625_data *ctx, u8 len_cmd)
static int sp_tx_get_edid_block(struct anx7625_data *ctx)
{
int c = 0;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
sp_tx_aux_wr(ctx, 0x7e);
sp_tx_aux_rd(ctx, 0x01);
@@ -1041,7 +1041,7 @@ static int edid_read(struct anx7625_data *ctx,
u8 offset, u8 *pblock_buf)
{
int ret, cnt;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) {
sp_tx_aux_wr(ctx, offset);
@@ -1072,7 +1072,7 @@ static int segments_edid_read(struct anx7625_data *ctx,
{
u8 cnt;
int ret;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
/* Write address only */
ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
@@ -1127,7 +1127,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
u8 i, j;
int g_edid_break = 0;
int ret;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
/* Address initial */
ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
@@ -1234,7 +1234,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
static void anx7625_power_on(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
int ret, i;
if (!ctx->pdata.low_power_mode) {
@@ -1270,7 +1270,7 @@ reg_err:
static void anx7625_power_standby(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
int ret;
if (!ctx->pdata.low_power_mode) {
@@ -1300,7 +1300,7 @@ static void anx7625_config(struct anx7625_data *ctx)
static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
int ret;
/* Reset main ocm */
@@ -1320,7 +1320,7 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
{
int ret;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
/* Check interface workable */
ret = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
@@ -1366,7 +1366,7 @@ static void anx7625_power_on_init(struct anx7625_data *ctx)
static void anx7625_init_gpio(struct anx7625_data *platform)
{
- struct device *dev = &platform->client->dev;
+ struct device *dev = platform->dev;
DRM_DEV_DEBUG_DRIVER(dev, "init gpio\n");
@@ -1406,7 +1406,7 @@ static void anx7625_stop_dp_work(struct anx7625_data *ctx)
static void anx7625_start_dp_work(struct anx7625_data *ctx)
{
int ret;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
if (ctx->hpd_high_cnt >= 2) {
DRM_DEV_DEBUG_DRIVER(dev, "filter useless HPD\n");
@@ -1458,7 +1458,7 @@ static int _anx7625_hpd_polling(struct anx7625_data *ctx,
unsigned long wait_us)
{
int ret, val;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
/* Interrupt mode, no need poll HPD status, just return */
if (ctx->pdata.intp_irq)
@@ -1492,7 +1492,7 @@ static int anx7625_wait_hpd_asserted(struct drm_dp_aux *aux,
unsigned long wait_us)
{
struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux);
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
int ret;
pm_runtime_get_sync(dev);
@@ -1525,7 +1525,7 @@ static void anx7625_dp_adjust_swing(struct anx7625_data *ctx)
static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
/* HPD changed */
DRM_DEV_DEBUG_DRIVER(dev, "dp_hpd_change_default_func: %d\n",
@@ -1545,7 +1545,7 @@ static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
static int anx7625_hpd_change_detect(struct anx7625_data *ctx)
{
int intr_vector, status;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
status = anx7625_reg_write(ctx, ctx->i2c.tcpc_client,
INTR_ALERT_1, 0xFF);
@@ -1593,18 +1593,20 @@ static void anx7625_work_func(struct work_struct *work)
mutex_lock(&ctx->lock);
- if (pm_runtime_suspended(&ctx->client->dev))
- goto unlock;
+ if (pm_runtime_suspended(ctx->dev)) {
+ mutex_unlock(&ctx->lock);
+ return;
+ }
event = anx7625_hpd_change_detect(ctx);
+
+ mutex_unlock(&ctx->lock);
+
if (event < 0)
- goto unlock;
+ return;
if (ctx->bridge_attached)
drm_helper_hpd_irq_event(ctx->bridge.dev);
-
-unlock:
- mutex_unlock(&ctx->lock);
}
static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data)
@@ -1735,7 +1737,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux);
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
u8 request = msg->request & ~DP_AUX_I2C_MOT;
int ret = 0;
@@ -1761,7 +1763,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
struct s_edid_data *p_edid = &ctx->slimport_edid_p;
int edid_num;
u8 *edid;
@@ -1797,7 +1799,7 @@ static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n");
@@ -2006,7 +2008,7 @@ static const struct hdmi_codec_ops anx7625_codec_ops = {
static void anx7625_unregister_audio(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
if (ctx->audio_pdev) {
platform_device_unregister(ctx->audio_pdev);
@@ -2042,7 +2044,7 @@ static int anx7625_register_audio(struct device *dev, struct anx7625_data *ctx)
static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
{
struct mipi_dsi_device *dsi;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
struct mipi_dsi_host *host;
const struct mipi_dsi_device_info info = {
.type = "anx7625",
@@ -2076,7 +2078,7 @@ static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
static int anx7625_attach_dsi(struct anx7625_data *ctx)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
int ret;
DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n");
@@ -2102,7 +2104,7 @@ static void hdcp_check_work_func(struct work_struct *work)
dwork = to_delayed_work(work);
ctx = container_of(dwork, struct anx7625_data, hdcp_work);
- dev = &ctx->client->dev;
+ dev = ctx->dev;
if (!ctx->connector) {
dev_err(dev, "HDCP connector is null!");
@@ -2129,7 +2131,7 @@ static void hdcp_check_work_func(struct work_struct *work)
static int anx7625_connector_atomic_check(struct anx7625_data *ctx,
struct drm_connector_state *state)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
int cp;
dev_dbg(dev, "hdcp state check\n");
@@ -2174,7 +2176,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
int err;
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
DRM_DEV_DEBUG_DRIVER(dev, "drm attach\n");
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
@@ -2218,7 +2220,7 @@ anx7625_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
DRM_DEV_DEBUG_DRIVER(dev, "drm mode checking\n");
@@ -2239,7 +2241,7 @@ static void anx7625_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
DRM_DEV_DEBUG_DRIVER(dev, "drm mode set\n");
@@ -2285,7 +2287,7 @@ static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge,
struct drm_display_mode *adj)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
u32 hsync, hfp, hbp, hblanking;
u32 adj_hsync, adj_hfp, adj_hbp, adj_hblanking, delta_adj;
u32 vref, adj_clock;
@@ -2403,7 +2405,7 @@ static int anx7625_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_connector_state *conn_state)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
dev_dbg(dev, "drm bridge atomic check\n");
@@ -2417,7 +2419,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *state)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
struct drm_connector *connector;
dev_dbg(dev, "drm atomic enable\n");
@@ -2444,7 +2446,7 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
dev_dbg(dev, "drm atomic disable\n");
@@ -2458,7 +2460,7 @@ static enum drm_connector_status
anx7625_bridge_detect(struct drm_bridge *bridge)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n");
@@ -2469,7 +2471,7 @@ static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
DRM_DEV_DEBUG_DRIVER(dev, "drm bridge get edid\n");
@@ -2494,7 +2496,7 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
struct i2c_client *client)
{
- struct device *dev = &ctx->client->dev;
+ struct device *dev = ctx->dev;
ctx->i2c.tx_p0_client = devm_i2c_new_dummy_device(dev, client->adapter,
TX_P0_ADDR >> 1);
@@ -2629,7 +2631,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
pdata = &platform->pdata;
- platform->client = client;
+ platform->dev = &client->dev;
i2c_set_clientdata(client, platform);
pdata->supplies[0].supply = "vdd10";
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index 14f33d6be289..5af819611ebc 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -458,7 +458,7 @@ struct anx7625_data {
int hdcp_cp;
/* Lock for work queue */
struct mutex lock;
- struct i2c_client *client;
+ struct device *dev;
struct anx7625_i2c_client i2c;
struct i2c_client *last_client;
struct timer_list hdcp_timer;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index f50d65f54314..7457d38622b0 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -14,8 +14,7 @@
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index f6822dfa3805..6af565ac307a 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -29,7 +29,6 @@
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-dp.h>
#include <linux/platform_device.h>
@@ -54,6 +53,26 @@
#include "cdns-mhdp8546-hdcp.h"
#include "cdns-mhdp8546-j721e.h"
+static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+ /* Enable SW event interrupts */
+ if (mhdp->bridge_attached)
+ writel(readl(mhdp->regs + CDNS_APB_INT_MASK) &
+ ~CDNS_APB_INT_MASK_SW_EVENT_INT,
+ mhdp->regs + CDNS_APB_INT_MASK);
+}
+
+static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+ writel(readl(mhdp->regs + CDNS_APB_INT_MASK) |
+ CDNS_APB_INT_MASK_SW_EVENT_INT,
+ mhdp->regs + CDNS_APB_INT_MASK);
+}
+
static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
{
int ret, empty;
@@ -749,9 +768,7 @@ static int cdns_mhdp_fw_activate(const struct firmware *fw,
* MHDP_HW_STOPPED happens only due to driver removal when
* bridge should already be detached.
*/
- if (mhdp->bridge_attached)
- writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
- mhdp->regs + CDNS_APB_INT_MASK);
+ cdns_mhdp_bridge_hpd_enable(&mhdp->bridge);
spin_unlock(&mhdp->start_lock);
@@ -1740,8 +1757,7 @@ static int cdns_mhdp_attach(struct drm_bridge *bridge,
/* Enable SW event interrupts */
if (hw_ready)
- writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
- mhdp->regs + CDNS_APB_INT_MASK);
+ cdns_mhdp_bridge_hpd_enable(bridge);
return 0;
aux_unregister:
@@ -2146,6 +2162,27 @@ cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
return &cdns_mhdp_state->base;
}
+static u32 *cdns_mhdp_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ *num_input_fmts = 1;
+ input_fmts[0] = MEDIA_BUS_FMT_RGB121212_1X36;
+
+ return input_fmts;
+}
+
static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@@ -2165,6 +2202,13 @@ static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
return -EINVAL;
}
+ /*
+ * There might be flags negotiation supported in future.
+ * Set the bus flags in atomic_check statically for now.
+ */
+ if (mhdp->info)
+ bridge_state->input_bus_cfg.flags = *mhdp->info->input_bus_flags;
+
mutex_unlock(&mhdp->link_mutex);
return 0;
}
@@ -2184,23 +2228,6 @@ static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
return cdns_mhdp_get_edid(mhdp, connector);
}
-static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
-{
- struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
-
- /* Enable SW event interrupts */
- if (mhdp->bridge_attached)
- writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
- mhdp->regs + CDNS_APB_INT_MASK);
-}
-
-static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
-{
- struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
-
- writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
-}
-
static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
.atomic_enable = cdns_mhdp_atomic_enable,
.atomic_disable = cdns_mhdp_atomic_disable,
@@ -2210,6 +2237,7 @@ static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
.atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
.atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
.atomic_reset = cdns_mhdp_bridge_atomic_reset,
+ .atomic_get_input_bus_fmts = cdns_mhdp_get_input_bus_fmts,
.detect = cdns_mhdp_bridge_detect,
.get_edid = cdns_mhdp_bridge_get_edid,
.hpd_enable = cdns_mhdp_bridge_hpd_enable,
@@ -2529,8 +2557,6 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD;
mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
- if (mhdp->info)
- mhdp->bridge.timings = mhdp->info->timings;
ret = phy_init(mhdp->phy);
if (ret) {
@@ -2617,7 +2643,7 @@ static const struct of_device_id mhdp_ids[] = {
#ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
{ .compatible = "ti,j721e-mhdp8546",
.data = &(const struct cdns_mhdp_platform_info) {
- .timings = &mhdp_ti_j721e_bridge_timings,
+ .input_bus_flags = &mhdp_ti_j721e_bridge_input_bus_flags,
.ops = &mhdp_ti_j721e_ops,
},
},
@@ -2629,7 +2655,7 @@ MODULE_DEVICE_TABLE(of, mhdp_ids);
static struct platform_driver mhdp_driver = {
.driver = {
.name = "cdns-mhdp8546",
- .of_match_table = of_match_ptr(mhdp_ids),
+ .of_match_table = mhdp_ids,
},
.probe = cdns_mhdp_probe,
.remove = cdns_mhdp_remove,
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
index bedddd510d17..bad2fc0c7306 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
@@ -336,7 +336,7 @@ struct cdns_mhdp_bridge_state {
};
struct cdns_mhdp_platform_info {
- const struct drm_bridge_timings *timings;
+ const u32 *input_bus_flags;
const struct mhdp_platform_ops *ops;
};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c
index dfe1b59514f7..12d04be4e242 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c
@@ -71,8 +71,7 @@ const struct mhdp_platform_ops mhdp_ti_j721e_ops = {
.disable = cdns_mhdp_j721e_disable,
};
-const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings = {
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
- DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE |
- DRM_BUS_FLAG_DE_HIGH,
-};
+const u32
+mhdp_ti_j721e_bridge_input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_DE_HIGH;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h
index 97d20d115a24..5ddca07a4255 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h
@@ -14,6 +14,6 @@
struct mhdp_platform_ops;
extern const struct mhdp_platform_ops mhdp_ti_j721e_ops;
-extern const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings;
+extern const u32 mhdp_ti_j721e_bridge_input_bus_flags;
#endif /* !CDNS_MHDP8546_J721E_H */
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 8bfce21d6b90..d205e755e524 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -17,7 +17,7 @@
#include <linux/i2c.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index a854eb84e399..483c28c7fc99 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -607,7 +607,7 @@ static struct i2c_driver ch7033_driver = {
.remove = ch7033_remove,
.driver = {
.name = "ch7033",
- .of_match_table = of_match_ptr(ch7033_dt_ids),
+ .of_match_table = ch7033_dt_ids,
},
.id_table = ch7033_ids,
};
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index f7f436cf96e0..08bd5695ddae 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index b8e52156b07a..0e4bac7dd04f 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -8,7 +8,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
index 386032a02599..21471a9a28b2 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
@@ -9,9 +9,9 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index c806576b1e22..7984da9c0a35 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -12,6 +12,7 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 504d51c42f79..2f300f5ca051 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -404,7 +404,7 @@ struct debugfs_entries {
struct it6505 {
struct drm_dp_aux aux;
struct drm_bridge bridge;
- struct i2c_client *client;
+ struct device *dev;
struct it6505_drm_dp_link link;
struct it6505_platform_data pdata;
/*
@@ -524,7 +524,7 @@ static int it6505_read(struct it6505 *it6505, unsigned int reg_addr)
{
unsigned int value;
int err;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
if (!it6505->powered)
return -ENODEV;
@@ -542,7 +542,7 @@ static int it6505_write(struct it6505 *it6505, unsigned int reg_addr,
unsigned int reg_val)
{
int err;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
if (!it6505->powered)
return -ENODEV;
@@ -562,7 +562,7 @@ static int it6505_set_bits(struct it6505 *it6505, unsigned int reg,
unsigned int mask, unsigned int value)
{
int err;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
if (!it6505->powered)
return -ENODEV;
@@ -580,7 +580,7 @@ static int it6505_set_bits(struct it6505 *it6505, unsigned int reg,
static void it6505_debug_print(struct it6505 *it6505, unsigned int reg,
const char *prefix)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int val;
if (!drm_debug_enabled(DRM_UT_DRIVER))
@@ -599,7 +599,7 @@ static int it6505_dpcd_read(struct it6505 *it6505, unsigned long offset)
{
u8 value;
int ret;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
ret = drm_dp_dpcd_readb(&it6505->aux, offset, &value);
if (ret < 0) {
@@ -613,7 +613,7 @@ static int it6505_dpcd_write(struct it6505 *it6505, unsigned long offset,
u8 datain)
{
int ret;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
ret = drm_dp_dpcd_writeb(&it6505->aux, offset, datain);
if (ret < 0) {
@@ -626,7 +626,7 @@ static int it6505_dpcd_write(struct it6505 *it6505, unsigned long offset,
static int it6505_get_dpcd(struct it6505 *it6505, int offset, u8 *dpcd, int num)
{
int ret;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
ret = drm_dp_dpcd_read(&it6505->aux, offset, dpcd, num);
@@ -643,7 +643,7 @@ static void it6505_dump(struct it6505 *it6505)
{
unsigned int i, j;
u8 regs[16];
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
for (i = 0; i <= 0xff; i += 16) {
for (j = 0; j < 16; j++)
@@ -682,7 +682,7 @@ static int it6505_read_word(struct it6505 *it6505, unsigned int reg)
static void it6505_calc_video_info(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int hsync_pol, vsync_pol, interlaced;
int htotal, hdes, hdew, hfph, hsyncw;
int vtotal, vdes, vdew, vfph, vsyncw;
@@ -926,7 +926,7 @@ static int it6505_aux_wait(struct it6505 *it6505)
{
int status;
unsigned long timeout;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
@@ -1141,7 +1141,7 @@ static int it6505_get_edid_block(void *data, u8 *buf, unsigned int block,
size_t len)
{
struct it6505 *it6505 = data;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
enum aux_cmd_reply reply;
int offset, ret, aux_retry = 100;
@@ -1201,7 +1201,7 @@ static int it6505_send_video_infoframe(struct it6505 *it6505,
{
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
int err;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer));
if (err < 0) {
@@ -1231,7 +1231,7 @@ static void it6505_get_extcon_property(struct it6505 *it6505)
{
int err;
union extcon_property_value property;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
if (it6505->extcon && !it6505->lane_swap_disabled) {
err = extcon_get_property(it6505->extcon, EXTCON_DISP_DP,
@@ -1382,7 +1382,7 @@ static void it6505_enable_audio_source(struct it6505 *it6505)
static void it6505_enable_audio_infoframe(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
u8 audio_info_ca[] = { 0x00, 0x00, 0x01, 0x03, 0x07, 0x0B, 0x0F, 0x1F };
DRM_DEV_DEBUG_DRIVER(dev, "infoframe channel_allocation:0x%02x",
@@ -1411,7 +1411,7 @@ static void it6505_disable_audio(struct it6505 *it6505)
static void it6505_enable_audio(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int regbe;
DRM_DEV_DEBUG_DRIVER(dev, "start");
@@ -1446,7 +1446,7 @@ static bool it6505_use_step_train_check(struct it6505 *it6505)
static void it6505_parse_link_capabilities(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
struct it6505_drm_dp_link *link = &it6505->link;
int bcaps;
@@ -1557,7 +1557,7 @@ static void it6505_lane_count_setup(struct it6505 *it6505)
static void it6505_link_training_setup(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
if (it6505->enable_enhanced_frame)
it6505_set_bits(it6505, REG_DATA_MUTE_CTRL,
@@ -1708,7 +1708,7 @@ it6505_step_cr_train(struct it6505 *it6505,
FORCE_CR_DONE);
return true;
}
- DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "cr not done");
+ DRM_DEV_DEBUG_DRIVER(it6505->dev, "cr not done");
if (it6505_check_max_voltage_swing_reached(lane_level_config,
it6505->lane_count))
@@ -1785,7 +1785,7 @@ it6505_step_eq_train(struct it6505 *it6505,
FORCE_EQ_DONE);
return true;
}
- DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "eq not done");
+ DRM_DEV_DEBUG_DRIVER(it6505->dev, "eq not done");
for (i = 0; i < it6505->lane_count; i++) {
lane_voltage_pre_emphasis->voltage_swing[i] =
@@ -1820,7 +1820,7 @@ static bool it6505_link_start_step_train(struct it6505 *it6505)
.pre_emphasis = { 0 },
};
- DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start");
+ DRM_DEV_DEBUG_DRIVER(it6505->dev, "start");
err = it6505_drm_dp_link_configure(it6505);
if (err < 0)
@@ -1854,7 +1854,7 @@ static void it6505_reset_hdcp(struct it6505 *it6505)
static void it6505_start_hdcp(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "start");
it6505_reset_hdcp(it6505);
@@ -1882,7 +1882,7 @@ static bool it6505_hdcp_is_ksv_valid(u8 *ksv)
static void it6505_hdcp_part1_auth(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
u8 hdcp_bcaps;
it6505_set_bits(it6505, REG_RESET_CTRL, HDCP_RESET, 0x00);
@@ -1923,7 +1923,7 @@ static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input,
struct shash_desc *desc;
struct crypto_shash *tfm;
int err;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
tfm = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(tfm)) {
@@ -1948,7 +1948,7 @@ static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input,
static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
u8 binfo[2];
int down_stream_count, i, err, msg_count = 0;
@@ -2012,7 +2012,7 @@ static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
u8 av[5][4], bv[5][4];
int i, err;
@@ -2045,7 +2045,7 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
{
struct it6505 *it6505 = container_of(work, struct it6505,
hdcp_wait_ksv_list);
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
unsigned int timeout = 5000;
u8 bstatus = 0;
bool ksv_list_check;
@@ -2087,7 +2087,7 @@ static void it6505_hdcp_work(struct work_struct *work)
{
struct it6505 *it6505 = container_of(work, struct it6505,
hdcp_work.work);
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int ret;
u8 link_status[DP_LINK_STATUS_SIZE] = { 0 };
@@ -2128,7 +2128,7 @@ static void it6505_hdcp_work(struct work_struct *work)
static void it6505_show_hdcp_info(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int i;
u8 *sha1 = it6505->sha1_input;
@@ -2162,7 +2162,7 @@ static void it6505_stop_link_train(struct it6505 *it6505)
static void it6505_link_train_ok(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
it6505->link_state = LINK_OK;
/* disalbe mute enable avi info frame */
@@ -2181,7 +2181,7 @@ static void it6505_link_train_ok(struct it6505 *it6505)
static void it6505_link_step_train_process(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int ret, i, step_retry = 3;
DRM_DEV_DEBUG_DRIVER(dev, "Start step train");
@@ -2219,7 +2219,7 @@ static void it6505_link_step_train_process(struct it6505 *it6505)
static void it6505_link_training_work(struct work_struct *work)
{
struct it6505 *it6505 = container_of(work, struct it6505, link_works);
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int ret;
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d",
@@ -2267,7 +2267,7 @@ static void it6505_remove_edid(struct it6505 *it6505)
static int it6505_process_hpd_irq(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int ret, dpcd_sink_count, dp_irq_vector, bstatus;
u8 link_status[DP_LINK_STATUS_SIZE];
@@ -2331,7 +2331,7 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
static void it6505_irq_hpd(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int dp_sink_count;
it6505->hpd_state = it6505_get_sink_hpd_status(it6505);
@@ -2393,7 +2393,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
static void it6505_irq_hpd_irq(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "hpd_irq interrupt");
@@ -2403,7 +2403,7 @@ static void it6505_irq_hpd_irq(struct it6505 *it6505)
static void it6505_irq_scdt(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
bool data;
data = it6505_get_video_status(it6505);
@@ -2418,7 +2418,7 @@ static void it6505_irq_scdt(struct it6505 *it6505)
static void it6505_irq_hdcp_done(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "hdcp done interrupt");
it6505->hdcp_status = HDCP_AUTH_DONE;
@@ -2427,7 +2427,7 @@ static void it6505_irq_hdcp_done(struct it6505 *it6505)
static void it6505_irq_hdcp_fail(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "hdcp fail interrupt");
it6505->hdcp_status = HDCP_AUTH_IDLE;
@@ -2437,14 +2437,14 @@ static void it6505_irq_hdcp_fail(struct it6505 *it6505)
static void it6505_irq_aux_cmd_fail(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "AUX PC Request Fail Interrupt");
}
static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt");
schedule_work(&it6505->hdcp_wait_ksv_list);
@@ -2452,7 +2452,7 @@ static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505)
static void it6505_irq_audio_fifo_error(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "audio fifo error Interrupt");
@@ -2462,7 +2462,7 @@ static void it6505_irq_audio_fifo_error(struct it6505 *it6505)
static void it6505_irq_link_train_fail(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "link training fail interrupt");
schedule_work(&it6505->link_works);
@@ -2470,7 +2470,7 @@ static void it6505_irq_link_train_fail(struct it6505 *it6505)
static void it6505_irq_video_fifo_error(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "video fifo overflow interrupt");
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
@@ -2481,7 +2481,7 @@ static void it6505_irq_video_fifo_error(struct it6505 *it6505)
static void it6505_irq_io_latch_fifo_overflow(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "IO latch fifo overflow interrupt");
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
@@ -2498,7 +2498,7 @@ static bool it6505_test_bit(unsigned int bit, const unsigned int *addr)
static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
{
struct it6505 *it6505 = data;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
static const struct {
int bit;
void (*handler)(struct it6505 *it6505);
@@ -2517,9 +2517,11 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
};
int int_status[3], i;
- if (it6505->enable_drv_hold || pm_runtime_get_if_in_use(dev) <= 0)
+ if (it6505->enable_drv_hold || !it6505->powered)
return IRQ_HANDLED;
+ pm_runtime_get_sync(dev);
+
int_status[0] = it6505_read(it6505, INT_STATUS_01);
int_status[1] = it6505_read(it6505, INT_STATUS_02);
int_status[2] = it6505_read(it6505, INT_STATUS_03);
@@ -2550,7 +2552,7 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
static int it6505_poweron(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
struct it6505_platform_data *pdata = &it6505->pdata;
int err;
@@ -2599,7 +2601,7 @@ static int it6505_poweron(struct it6505 *it6505)
static int it6505_poweroff(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
struct it6505_platform_data *pdata = &it6505->pdata;
int err;
@@ -2633,7 +2635,7 @@ static int it6505_poweroff(struct it6505 *it6505)
static enum drm_connector_status it6505_detect(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
enum drm_connector_status status = connector_status_disconnected;
int dp_sink_count;
@@ -2694,7 +2696,7 @@ static int it6505_extcon_notifier(struct notifier_block *self,
static void it6505_extcon_work(struct work_struct *work)
{
struct it6505 *it6505 = container_of(work, struct it6505, extcon_wq);
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int state, ret;
if (it6505->enable_drv_hold)
@@ -2739,11 +2741,11 @@ unlock:
static int it6505_use_notifier_module(struct it6505 *it6505)
{
int ret;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
it6505->event_nb.notifier_call = it6505_extcon_notifier;
INIT_WORK(&it6505->extcon_wq, it6505_extcon_work);
- ret = devm_extcon_register_notifier(&it6505->client->dev,
+ ret = devm_extcon_register_notifier(it6505->dev,
it6505->extcon, EXTCON_DISP_DP,
&it6505->event_nb);
if (ret) {
@@ -2759,7 +2761,7 @@ static int it6505_use_notifier_module(struct it6505 *it6505)
static void it6505_remove_notifier_module(struct it6505 *it6505)
{
if (it6505->extcon) {
- devm_extcon_unregister_notifier(&it6505->client->dev,
+ devm_extcon_unregister_notifier(it6505->dev,
it6505->extcon, EXTCON_DISP_DP,
&it6505->event_nb);
@@ -2772,7 +2774,7 @@ static void __maybe_unused it6505_delayed_audio(struct work_struct *work)
struct it6505 *it6505 = container_of(work, struct it6505,
delayed_audio.work);
- DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start");
+ DRM_DEV_DEBUG_DRIVER(it6505->dev, "start");
if (!it6505->powered)
return;
@@ -2785,7 +2787,7 @@ static int __maybe_unused it6505_audio_setup_hw_params(struct it6505 *it6505,
struct hdmi_codec_params
*params)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int i = 0;
DRM_DEV_DEBUG_DRIVER(dev, "%s %d Hz, %d bit, %d channels\n", __func__,
@@ -2869,7 +2871,7 @@ static int it6505_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
int ret;
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
@@ -2933,7 +2935,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
struct drm_atomic_state *state = old_state->base.state;
struct hdmi_avi_infoframe frame;
struct drm_crtc_state *crtc_state;
@@ -2989,7 +2991,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "start");
@@ -3004,7 +3006,7 @@ static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "start");
@@ -3015,7 +3017,7 @@ static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
DRM_DEV_DEBUG_DRIVER(dev, "start");
@@ -3034,7 +3036,7 @@ static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
if (!it6505->cached_edid) {
it6505->cached_edid = drm_do_get_edid(connector, it6505_get_edid_block,
@@ -3086,7 +3088,7 @@ static const struct dev_pm_ops it6505_bridge_pm_ops = {
static int it6505_init_pdata(struct it6505 *it6505)
{
struct it6505_platform_data *pdata = &it6505->pdata;
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
/* 1.0V digital core power regulator */
pdata->pwr18 = devm_regulator_get(dev, "pwr18");
@@ -3128,7 +3130,7 @@ static int it6505_get_data_lanes_count(const struct device_node *endpoint,
static void it6505_parse_dt(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
struct device_node *np = dev->of_node, *ep = NULL;
int len;
u64 link_frequencies;
@@ -3333,7 +3335,7 @@ static void debugfs_create_files(struct it6505 *it6505)
static void debugfs_init(struct it6505 *it6505)
{
- struct device *dev = &it6505->client->dev;
+ struct device *dev = it6505->dev;
it6505->debugfs = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
@@ -3375,7 +3377,7 @@ static int it6505_i2c_probe(struct i2c_client *client)
it6505->bridge.of_node = client->dev.of_node;
it6505->connector_status = connector_status_disconnected;
- it6505->client = client;
+ it6505->dev = &client->dev;
i2c_set_clientdata(client, it6505);
/* get extcon device from DTS */
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index aa8d47e7f40d..4d404f5ef87e 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -16,7 +16,6 @@
#include <linux/i2c.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 5163e5224aad..9663601ce098 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -774,9 +774,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO_NO_HSA |
- MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
- MIPI_DSI_MODE_NO_EOT_PACKET;
+ MIPI_DSI_MODE_VIDEO_HSE;
ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 2a57e804ea02..22c84d29c2bc 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -28,6 +28,8 @@
#define EDID_BLOCK_SIZE 128
#define EDID_NUM_BLOCKS 2
+#define FW_FILE "lt9611uxc_fw.bin"
+
struct lt9611uxc {
struct device *dev;
struct drm_bridge bridge;
@@ -754,7 +756,7 @@ static int lt9611uxc_firmware_update(struct lt9611uxc *lt9611uxc)
REG_SEQ0(0x805a, 0x00),
};
- ret = request_firmware(&fw, "lt9611uxc_fw.bin", lt9611uxc->dev);
+ ret = request_firmware(&fw, FW_FILE, lt9611uxc->dev);
if (ret < 0)
return ret;
@@ -1019,3 +1021,5 @@ module_i2c_driver(lt9611uxc_driver);
MODULE_AUTHOR("Dmitry Baryshkov <[email protected]>");
MODULE_LICENSE("GPL v2");
+
+MODULE_FIRMWARE(FW_FILE);
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 67368f23d4aa..8c5668dca0c4 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -7,7 +7,6 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 4a5f5c4f5dcc..8d54091ec66e 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -16,8 +16,8 @@
#include <linux/module.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/sys_soc.h>
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index c9b6cb7678e3..ae3ab9262ef1 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -12,7 +12,6 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 8801cdd033b5..8161b1a1a4b1 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -105,7 +105,6 @@ struct ps8640 {
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_powerdown;
struct device_link *link;
- struct edid *edid;
bool pre_enabled;
bool need_post_hpd_delay;
};
@@ -155,23 +154,6 @@ static inline struct ps8640 *aux_to_ps8640(struct drm_dp_aux *aux)
return container_of(aux, struct ps8640, aux);
}
-static bool ps8640_of_panel_on_aux_bus(struct device *dev)
-{
- struct device_node *bus, *panel;
-
- bus = of_get_child_by_name(dev->of_node, "aux-bus");
- if (!bus)
- return false;
-
- panel = of_get_child_by_name(bus, "panel");
- of_node_put(bus);
- if (!panel)
- return false;
- of_node_put(panel);
-
- return true;
-}
-
static int _ps8640_wait_hpd_asserted(struct ps8640 *ps_bridge, unsigned long wait_us)
{
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
@@ -539,50 +521,6 @@ static void ps8640_bridge_detach(struct drm_bridge *bridge)
device_link_del(ps_bridge->link);
}
-static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector)
-{
- struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
- struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
- bool poweroff = !ps_bridge->pre_enabled;
-
- if (!ps_bridge->edid) {
- /*
- * When we end calling get_edid() triggered by an ioctl, i.e
- *
- * drm_mode_getconnector (ioctl)
- * -> drm_helper_probe_single_connector_modes
- * -> drm_bridge_connector_get_modes
- * -> ps8640_bridge_get_edid
- *
- * We need to make sure that what we need is enabled before
- * reading EDID, for this chip, we need to do a full poweron,
- * otherwise it will fail.
- */
- if (poweroff)
- drm_atomic_bridge_chain_pre_enable(bridge,
- connector->state->state);
-
- ps_bridge->edid = drm_get_edid(connector,
- ps_bridge->page[PAGE0_DP_CNTL]->adapter);
-
- /*
- * If we call the get_edid() function without having enabled the
- * chip before, return the chip to its original power state.
- */
- if (poweroff)
- drm_atomic_bridge_chain_post_disable(bridge,
- connector->state->state);
- }
-
- if (!ps_bridge->edid) {
- dev_err(dev, "Failed to get EDID\n");
- return NULL;
- }
-
- return drm_edid_duplicate(ps_bridge->edid);
-}
-
static void ps8640_runtime_disable(void *data)
{
pm_runtime_dont_use_autosuspend(data);
@@ -592,7 +530,6 @@ static void ps8640_runtime_disable(void *data)
static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.attach = ps8640_bridge_attach,
.detach = ps8640_bridge_detach,
- .get_edid = ps8640_bridge_get_edid,
.atomic_post_disable = ps8640_atomic_post_disable,
.atomic_pre_enable = ps8640_atomic_pre_enable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
@@ -706,14 +643,6 @@ static int ps8640_probe(struct i2c_client *client)
ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP;
/*
- * In the device tree, if panel is listed under aux-bus of the bridge
- * node, panel driver should be able to retrieve EDID by itself using
- * aux-bus. So let's not set DRM_BRIDGE_OP_EDID here.
- */
- if (!ps8640_of_panel_on_aux_bus(&client->dev))
- ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
-
- /*
* Get MIPI DSI resources early. These can return -EPROBE_DEFER so
* we want to get them out of the way sooner.
*/
@@ -777,13 +706,6 @@ static int ps8640_probe(struct i2c_client *client)
return ret;
}
-static void ps8640_remove(struct i2c_client *client)
-{
- struct ps8640 *ps_bridge = i2c_get_clientdata(client);
-
- kfree(ps_bridge->edid);
-}
-
static const struct of_device_id ps8640_match[] = {
{ .compatible = "parade,ps8640" },
{ }
@@ -792,7 +714,6 @@ MODULE_DEVICE_TABLE(of, ps8640_match);
static struct i2c_driver ps8640_driver = {
.probe = ps8640_probe,
- .remove = ps8640_remove,
.driver = {
.name = "ps8640",
.of_match_table = ps8640_match,
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 043b8109e64a..c49091691ab1 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -16,8 +16,9 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/media-bus-format.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <video/mipi_display.h>
@@ -1009,7 +1010,7 @@ static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
do {
u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG);
- if (!(reg & DSIM_SFR_HEADER_FULL))
+ if (reg & DSIM_SFR_HEADER_EMPTY)
return 0;
if (!cond_resched())
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index aac239729a1d..2bdc5b439beb 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -473,6 +473,41 @@ static struct edid *sii902x_bridge_get_edid(struct drm_bridge *bridge,
return sii902x_get_edid(sii902x, connector);
}
+static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int sii902x_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ /*
+ * There might be flags negotiation supported in future but
+ * set the bus flags in atomic_check statically for now.
+ */
+ bridge_state->input_bus_cfg.flags = bridge->timings->input_bus_flags;
+
+ return 0;
+}
+
static const struct drm_bridge_funcs sii902x_bridge_funcs = {
.attach = sii902x_bridge_attach,
.mode_set = sii902x_bridge_mode_set,
@@ -480,6 +515,11 @@ static const struct drm_bridge_funcs sii902x_bridge_funcs = {
.enable = sii902x_bridge_enable,
.detect = sii902x_bridge_detect,
.get_edid = sii902x_bridge_get_edid,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = sii902x_bridge_atomic_get_input_bus_fmts,
+ .atomic_check = sii902x_bridge_atomic_check,
};
static int sii902x_mute(struct sii902x *sii902x, bool mute)
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 79b09ccd1353..599164e3877d 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2376,7 +2376,7 @@ MODULE_DEVICE_TABLE(i2c, sii8620_id);
static struct i2c_driver sii8620_driver = {
.driver = {
.name = "sii8620",
- .of_match_table = of_match_ptr(sii8620_dt_match),
+ .of_match_table = sii8620_dt_match,
},
.probe = sii8620_probe,
.remove = sii8620_remove,
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index d85d9ee463b8..cbe8e778d7c7 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -8,8 +8,9 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index 9389ce526eb1..be21c11de1f2 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -62,6 +62,10 @@ struct dw_hdmi_cec {
bool rx_done;
struct cec_notifier *notify;
int irq;
+
+ u8 regs_polarity;
+ u8 regs_mask;
+ u8 regs_mute_stat0;
};
static void dw_hdmi_write(struct dw_hdmi_cec *cec, u8 val, int offset)
@@ -304,11 +308,44 @@ static void dw_hdmi_cec_remove(struct platform_device *pdev)
cec_unregister_adapter(cec->adap);
}
+static int __maybe_unused dw_hdmi_cec_resume(struct device *dev)
+{
+ struct dw_hdmi_cec *cec = dev_get_drvdata(dev);
+
+ /* Restore logical address */
+ dw_hdmi_write(cec, cec->addresses & 255, HDMI_CEC_ADDR_L);
+ dw_hdmi_write(cec, cec->addresses >> 8, HDMI_CEC_ADDR_H);
+
+ /* Restore interrupt status/mask registers */
+ dw_hdmi_write(cec, cec->regs_polarity, HDMI_CEC_POLARITY);
+ dw_hdmi_write(cec, cec->regs_mask, HDMI_CEC_MASK);
+ dw_hdmi_write(cec, cec->regs_mute_stat0, HDMI_IH_MUTE_CEC_STAT0);
+
+ return 0;
+}
+
+static int __maybe_unused dw_hdmi_cec_suspend(struct device *dev)
+{
+ struct dw_hdmi_cec *cec = dev_get_drvdata(dev);
+
+ /* store interrupt status/mask registers */
+ cec->regs_polarity = dw_hdmi_read(cec, HDMI_CEC_POLARITY);
+ cec->regs_mask = dw_hdmi_read(cec, HDMI_CEC_MASK);
+ cec->regs_mute_stat0 = dw_hdmi_read(cec, HDMI_IH_MUTE_CEC_STAT0);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw_hdmi_cec_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_cec_suspend, dw_hdmi_cec_resume)
+};
+
static struct platform_driver dw_hdmi_cec_driver = {
.probe = dw_hdmi_cec_probe,
.remove_new = dw_hdmi_cec_remove,
.driver = {
.name = "dw-hdmi-cec",
+ .pm = &dw_hdmi_cec_pm,
},
};
module_platform_driver(dw_hdmi_cec_driver);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 9d6dcaf317a1..6c1d79474505 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -14,7 +14,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
@@ -49,20 +49,6 @@
#define HDMI14_MAX_TMDSCLK 340000000
-enum hdmi_datamap {
- RGB444_8B = 0x01,
- RGB444_10B = 0x03,
- RGB444_12B = 0x05,
- RGB444_16B = 0x07,
- YCbCr444_8B = 0x09,
- YCbCr444_10B = 0x0B,
- YCbCr444_12B = 0x0D,
- YCbCr444_16B = 0x0F,
- YCbCr422_8B = 0x16,
- YCbCr422_10B = 0x14,
- YCbCr422_12B = 0x12,
-};
-
static const u16 csc_coeff_default[3][4] = {
{ 0x2000, 0x0000, 0x0000, 0x0000 },
{ 0x0000, 0x2000, 0x0000, 0x0000 },
@@ -856,10 +842,10 @@ static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi)
if (pdata->enable_audio)
pdata->enable_audio(hdmi,
- hdmi->channels,
- hdmi->sample_width,
- hdmi->sample_rate,
- hdmi->sample_non_pcm);
+ hdmi->channels,
+ hdmi->sample_width,
+ hdmi->sample_rate,
+ hdmi->sample_non_pcm);
}
static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi)
@@ -1426,9 +1412,9 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi,
/* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
if (dw_hdmi_support_scdc(hdmi, display)) {
if (mtmdsclock > HDMI14_MAX_TMDSCLK)
- drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 1);
+ drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 1);
else
- drm_scdc_set_high_tmds_clock_ratio(&hdmi->connector, 0);
+ drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 0);
}
}
EXPORT_SYMBOL_GPL(dw_hdmi_set_high_tmds_clock_ratio);
@@ -2116,7 +2102,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
/* Enabled Scrambling in the Sink */
- drm_scdc_set_scrambling(&hdmi->connector, 1);
+ drm_scdc_set_scrambling(hdmi->curr_conn, 1);
/*
* To activate the scrambler feature, you must ensure
@@ -2132,7 +2118,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
HDMI_MC_SWRSTZ);
- drm_scdc_set_scrambling(&hdmi->connector, 0);
+ drm_scdc_set_scrambling(hdmi->curr_conn, 0);
}
}
@@ -2463,15 +2449,7 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi)
enum drm_connector_status result;
result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
-
- mutex_lock(&hdmi->mutex);
- if (result != hdmi->last_connector_result) {
- dev_dbg(hdmi->dev, "read_hpd result: %d", result);
- handle_plugged_change(hdmi,
- result == connector_status_connected);
- hdmi->last_connector_result = result;
- }
- mutex_unlock(&hdmi->mutex);
+ hdmi->last_connector_result = result;
return result;
}
@@ -2710,9 +2688,10 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
/* Default 8bit fallback */
output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY8_0_5X24;
- *num_output_fmts = i;
-
- return output_fmts;
+ if (drm_mode_is_420_only(info, mode)) {
+ *num_output_fmts = i;
+ return output_fmts;
+ }
}
/*
@@ -2971,6 +2950,7 @@ static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
hdmi->curr_conn = NULL;
dw_hdmi_update_power(hdmi);
dw_hdmi_update_phy_mask(hdmi);
+ handle_plugged_change(hdmi, false);
mutex_unlock(&hdmi->mutex);
}
@@ -2989,6 +2969,7 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
hdmi->curr_conn = connector;
dw_hdmi_update_power(hdmi);
dw_hdmi_update_phy_mask(hdmi);
+ handle_plugged_change(hdmi, true);
mutex_unlock(&hdmi->mutex);
}
@@ -3346,6 +3327,12 @@ static int dw_hdmi_parse_dt(struct dw_hdmi *hdmi)
return 0;
}
+bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi)
+{
+ return hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_bus_fmt_is_420);
+
struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
const struct dw_hdmi_plat_data *plat_data)
{
@@ -3553,6 +3540,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
hdmi->bridge.interlace_allowed = true;
+ hdmi->bridge.ddc = hdmi->ddc;
#ifdef CONFIG_OF
hdmi->bridge.of_node = pdev->dev.of_node;
#endif
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index b2efecf7d160..04d4a1a10698 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -13,7 +13,7 @@
#include <linux/debugfs.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -265,6 +265,7 @@ struct dw_mipi_dsi {
struct dw_mipi_dsi *master; /* dual-dsi master ptr */
struct dw_mipi_dsi *slave; /* dual-dsi slave ptr */
+ struct drm_display_mode mode;
const struct dw_mipi_dsi_plat_data *plat_data;
};
@@ -332,6 +333,7 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
if (IS_ERR(bridge))
return PTR_ERR(bridge);
+ bridge->pre_enable_prev_first = true;
dsi->panel_bridge = bridge;
drm_bridge_add(&dsi->bridge);
@@ -859,15 +861,6 @@ static void dw_mipi_dsi_bridge_post_atomic_disable(struct drm_bridge *bridge,
*/
dw_mipi_dsi_set_mode(dsi, 0);
- /*
- * TODO Only way found to call panel-bridge post_disable &
- * panel unprepare before the dsi "final" disable...
- * This needs to be fixed in the drm_bridge framework and the API
- * needs to be updated to manage our own call chains...
- */
- if (dsi->panel_bridge->funcs->post_disable)
- dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
-
if (phy_ops->power_off)
phy_ops->power_off(dsi->plat_data->priv_data);
@@ -942,15 +935,25 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
phy_ops->power_on(dsi->plat_data->priv_data);
}
+static void dw_mipi_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+
+ /* Power up the dsi ctl into a command mode */
+ dw_mipi_dsi_mode_set(dsi, &dsi->mode);
+ if (dsi->slave)
+ dw_mipi_dsi_mode_set(dsi->slave, &dsi->mode);
+}
+
static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
- dw_mipi_dsi_mode_set(dsi, adjusted_mode);
- if (dsi->slave)
- dw_mipi_dsi_mode_set(dsi->slave, adjusted_mode);
+ /* Store the display mode for later use in pre_enable callback */
+ drm_mode_copy(&dsi->mode, adjusted_mode);
}
static void dw_mipi_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
@@ -1004,6 +1007,7 @@ static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = dw_mipi_dsi_bridge_atomic_pre_enable,
.atomic_enable = dw_mipi_dsi_bridge_atomic_enable,
.atomic_post_disable = dw_mipi_dsi_bridge_post_atomic_disable,
.mode_set = dw_mipi_dsi_bridge_mode_set,
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 5641395fd310..46198af9eebb 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -41,8 +41,17 @@
#define DSI_LANEENABLE 0x0210 /* Enables each lane */
#define DSI_RX_START 1
-/* LCDC/DPI Host Registers */
-#define LCDCTRL 0x0420
+/* LCDC/DPI Host Registers, based on guesswork that this matches TC358764 */
+#define LCDCTRL 0x0420 /* Video Path Control */
+#define LCDCTRL_MSF BIT(0) /* Magic square in RGB666 */
+#define LCDCTRL_VTGEN BIT(4)/* Use chip clock for timing */
+#define LCDCTRL_UNK6 BIT(6) /* Unknown */
+#define LCDCTRL_EVTMODE BIT(5) /* Event mode */
+#define LCDCTRL_RGB888 BIT(8) /* RGB888 mode */
+#define LCDCTRL_HSPOL BIT(17) /* Polarity of HSYNC signal */
+#define LCDCTRL_DEPOL BIT(18) /* Polarity of DE signal */
+#define LCDCTRL_VSPOL BIT(19) /* Polarity of VSYNC signal */
+#define LCDCTRL_VSDELAY(v) (((v) & 0xfff) << 20) /* VSYNC delay */
/* SPI Master Registers */
#define SPICMR 0x0450
@@ -65,6 +74,7 @@ struct tc358762 {
struct regulator *regulator;
struct drm_bridge *panel_bridge;
struct gpio_desc *reset_gpio;
+ struct drm_display_mode mode;
bool pre_enabled;
int error;
};
@@ -105,6 +115,8 @@ static inline struct tc358762 *bridge_to_tc358762(struct drm_bridge *bridge)
static int tc358762_init(struct tc358762 *ctx)
{
+ u32 lcdctrl;
+
tc358762_write(ctx, DSI_LANEENABLE,
LANEENABLE_L0EN | LANEENABLE_CLEN);
tc358762_write(ctx, PPI_D0S_CLRSIPOCOUNT, 5);
@@ -114,7 +126,18 @@ static int tc358762_init(struct tc358762 *ctx)
tc358762_write(ctx, PPI_LPTXTIMECNT, LPX_PERIOD);
tc358762_write(ctx, SPICMR, 0x00);
- tc358762_write(ctx, LCDCTRL, 0x00100150);
+
+ lcdctrl = LCDCTRL_VSDELAY(1) | LCDCTRL_RGB888 |
+ LCDCTRL_UNK6 | LCDCTRL_VTGEN;
+
+ if (ctx->mode.flags & DRM_MODE_FLAG_NHSYNC)
+ lcdctrl |= LCDCTRL_HSPOL;
+
+ if (ctx->mode.flags & DRM_MODE_FLAG_NVSYNC)
+ lcdctrl |= LCDCTRL_VSPOL;
+
+ tc358762_write(ctx, LCDCTRL, lcdctrl);
+
tc358762_write(ctx, SYSCTRL, 0x040f);
msleep(100);
@@ -126,7 +149,7 @@ static int tc358762_init(struct tc358762 *ctx)
return tc358762_clear_error(ctx);
}
-static void tc358762_post_disable(struct drm_bridge *bridge)
+static void tc358762_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *state)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
int ret;
@@ -148,7 +171,7 @@ static void tc358762_post_disable(struct drm_bridge *bridge)
dev_err(ctx->dev, "error disabling regulators (%d)\n", ret);
}
-static void tc358762_pre_enable(struct drm_bridge *bridge)
+static void tc358762_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *state)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
int ret;
@@ -162,11 +185,17 @@ static void tc358762_pre_enable(struct drm_bridge *bridge)
usleep_range(5000, 10000);
}
+ ctx->pre_enabled = true;
+}
+
+static void tc358762_enable(struct drm_bridge *bridge, struct drm_bridge_state *state)
+{
+ struct tc358762 *ctx = bridge_to_tc358762(bridge);
+ int ret;
+
ret = tc358762_init(ctx);
if (ret < 0)
dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
-
- ctx->pre_enabled = true;
}
static int tc358762_attach(struct drm_bridge *bridge,
@@ -178,10 +207,24 @@ static int tc358762_attach(struct drm_bridge *bridge,
bridge, flags);
}
+static void tc358762_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adj)
+{
+ struct tc358762 *ctx = bridge_to_tc358762(bridge);
+
+ drm_mode_copy(&ctx->mode, mode);
+}
+
static const struct drm_bridge_funcs tc358762_bridge_funcs = {
- .post_disable = tc358762_post_disable,
- .pre_enable = tc358762_pre_enable,
+ .atomic_post_disable = tc358762_post_disable,
+ .atomic_pre_enable = tc358762_pre_enable,
+ .atomic_enable = tc358762_enable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.attach = tc358762_attach,
+ .mode_set = tc358762_bridge_mode_set,
};
static int tc358762_parse_dt(struct tc358762 *ctx)
@@ -231,7 +274,7 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 1;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_LPM;
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO_HSE;
ret = tc358762_parse_dt(ctx);
if (ret < 0)
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index f85654f1b104..deccb3995022 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -42,10 +42,10 @@
/* Video path registers */
#define VP_CTRL 0x0450 /* Video Path Control */
-#define VP_CTRL_MSF(v) FLD_VAL(v, 0, 0) /* Magic square in RGB666 */
-#define VP_CTRL_VTGEN(v) FLD_VAL(v, 4, 4) /* Use chip clock for timing */
-#define VP_CTRL_EVTMODE(v) FLD_VAL(v, 5, 5) /* Event mode */
-#define VP_CTRL_RGB888(v) FLD_VAL(v, 8, 8) /* RGB888 mode */
+#define VP_CTRL_MSF BIT(0) /* Magic square in RGB666 */
+#define VP_CTRL_VTGEN BIT(4) /* Use chip clock for timing */
+#define VP_CTRL_EVTMODE BIT(5) /* Event mode */
+#define VP_CTRL_RGB888 BIT(8) /* RGB888 mode */
#define VP_CTRL_VSDELAY(v) FLD_VAL(v, 31, 20) /* VSYNC delay */
#define VP_CTRL_HSPOL BIT(17) /* Polarity of HSYNC signal */
#define VP_CTRL_DEPOL BIT(18) /* Polarity of DE signal */
@@ -176,7 +176,7 @@ static void tc358764_read(struct tc358764 *ctx, u16 addr, u32 *val)
if (ret >= 0)
le32_to_cpus(val);
- dev_dbg(ctx->dev, "read: %d, addr: %d\n", addr, *val);
+ dev_dbg(ctx->dev, "read: addr=0x%04x data=0x%08x\n", addr, *val);
}
static void tc358764_write(struct tc358764 *ctx, u16 addr, u32 val)
@@ -233,8 +233,8 @@ static int tc358764_init(struct tc358764 *ctx)
tc358764_write(ctx, DSI_STARTDSI, DSI_RX_START);
/* configure video path */
- tc358764_write(ctx, VP_CTRL, VP_CTRL_VSDELAY(15) | VP_CTRL_RGB888(1) |
- VP_CTRL_EVTMODE(1) | VP_CTRL_HSPOL | VP_CTRL_VSPOL);
+ tc358764_write(ctx, VP_CTRL, VP_CTRL_VSDELAY(15) | VP_CTRL_RGB888 |
+ VP_CTRL_EVTMODE | VP_CTRL_HSPOL | VP_CTRL_VSPOL);
/* reset PHY */
tc358764_write(ctx, LV_PHY0, LV_PHY0_RST(1) |
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 65dc842e31f0..b45bffab7c81 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -500,8 +500,8 @@ static int tc_pllupdate(struct tc_data *tc, unsigned int pllctrl)
if (ret)
return ret;
- /* Wait for PLL to lock: up to 2.09 ms, depending on refclk */
- usleep_range(3000, 6000);
+ /* Wait for PLL to lock: up to 7.5 ms, depending on refclk */
+ usleep_range(15000, 20000);
return 0;
}
@@ -817,7 +817,7 @@ static int tc_set_common_video_mode(struct tc_data *tc,
* sync signals
*/
ret = regmap_write(tc->regmap, VPCTRL0,
- FIELD_PREP(VSDELAY, 0) |
+ FIELD_PREP(VSDELAY, right_margin + 10) |
OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
if (ret)
return ret;
@@ -2215,13 +2215,6 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return -EINVAL;
}
-static void tc_clk_disable(void *data)
-{
- struct clk *refclk = data;
-
- clk_disable_unprepare(refclk);
-}
-
static int tc_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -2238,20 +2231,10 @@ static int tc_probe(struct i2c_client *client)
if (ret)
return ret;
- tc->refclk = devm_clk_get(dev, "ref");
- if (IS_ERR(tc->refclk)) {
- ret = PTR_ERR(tc->refclk);
- dev_err(dev, "Failed to get refclk: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(tc->refclk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, tc_clk_disable, tc->refclk);
- if (ret)
- return ret;
+ tc->refclk = devm_clk_get_enabled(dev, "ref");
+ if (IS_ERR(tc->refclk))
+ return dev_err_probe(dev, PTR_ERR(tc->refclk),
+ "Failed to get and enable the ref clk\n");
/* tRSTW = 100 cycles , at 13 MHz that is ~7.69 us */
usleep_range(10, 15);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 7e9f4ec8e780..061e8bd5915d 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -31,7 +31,7 @@
#include <linux/i2c.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 597ceb7024e0..f448b903e190 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -170,10 +170,10 @@
* @pwm_refclk_freq: Cache for the reference clock input to the PWM.
*/
struct ti_sn65dsi86 {
- struct auxiliary_device bridge_aux;
- struct auxiliary_device gpio_aux;
- struct auxiliary_device aux_aux;
- struct auxiliary_device pwm_aux;
+ struct auxiliary_device *bridge_aux;
+ struct auxiliary_device *gpio_aux;
+ struct auxiliary_device *aux_aux;
+ struct auxiliary_device *pwm_aux;
struct device *dev;
struct regmap *regmap;
@@ -298,6 +298,10 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
if (refclk_lut[i] == refclk_rate)
break;
+ /* avoid buffer overflow and "1" is the default rate in the datasheet. */
+ if (i >= refclk_lut_size)
+ i = 1;
+
regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK,
REFCLK_FREQ(i));
@@ -464,27 +468,34 @@ static void ti_sn65dsi86_delete_aux(void *data)
auxiliary_device_delete(data);
}
-/*
- * AUX bus docs say that a non-NULL release is mandatory, but it makes no
- * sense for the model used here where all of the aux devices are allocated
- * in the single shared structure. We'll use this noop as a workaround.
- */
-static void ti_sn65dsi86_noop(struct device *dev) {}
+static void ti_sn65dsi86_aux_device_release(struct device *dev)
+{
+ struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
+
+ kfree(aux);
+}
static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
- struct auxiliary_device *aux,
+ struct auxiliary_device **aux_out,
const char *name)
{
struct device *dev = pdata->dev;
+ struct auxiliary_device *aux;
int ret;
+ aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+ if (!aux)
+ return -ENOMEM;
+
aux->name = name;
aux->dev.parent = dev;
- aux->dev.release = ti_sn65dsi86_noop;
+ aux->dev.release = ti_sn65dsi86_aux_device_release;
device_set_of_node_from_dev(&aux->dev, dev);
ret = auxiliary_device_init(aux);
- if (ret)
+ if (ret) {
+ kfree(aux);
return ret;
+ }
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
if (ret)
return ret;
@@ -493,6 +504,8 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
+ if (!ret)
+ *aux_out = aux;
return ret;
}
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index c06390da9ffd..28848a8eb42e 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -206,12 +206,55 @@ static enum drm_mode_status tfp410_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
+static u32 *tfp410_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ *num_input_fmts = 1;
+ input_fmts[0] = dvi->bus_format;
+
+ return input_fmts;
+}
+
+static int tfp410_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+ /*
+ * There might be flags negotiation supported in future.
+ * Set the bus flags in atomic_check statically for now.
+ */
+ bridge_state->input_bus_cfg.flags = dvi->timings.input_bus_flags;
+
+ return 0;
+}
+
static const struct drm_bridge_funcs tfp410_bridge_funcs = {
.attach = tfp410_attach,
.detach = tfp410_detach,
.enable = tfp410_enable,
.disable = tfp410_disable,
.mode_valid = tfp410_mode_valid,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = tfp410_get_input_bus_fmts,
+ .atomic_check = tfp410_atomic_check,
};
static const struct drm_bridge_timings tfp410_default_timings = {
@@ -405,7 +448,7 @@ MODULE_DEVICE_TABLE(i2c, tfp410_i2c_ids);
static struct i2c_driver tfp410_i2c_driver = {
.driver = {
.name = "tfp410",
- .of_match_table = of_match_ptr(tfp410_match),
+ .of_match_table = tfp410_match,
},
.id_table = tfp410_i2c_ids,
.probe = tfp410_i2c_probe,
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 16565a0a5da6..e6a78fd32380 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2103,7 +2103,7 @@ int drm_dp_aux_register(struct drm_dp_aux *aux)
aux->ddc.owner = THIS_MODULE;
aux->ddc.dev.parent = aux->dev;
- strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
+ strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
sizeof(aux->ddc.name));
ret = drm_dp_aux_register_devnode(aux);
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index be71be95b706..ed96cfcfa304 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -3404,7 +3404,7 @@ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
/* Skip failed payloads */
if (payload->vc_start_slot == -1) {
- drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
+ drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
payload->port->connector->name);
return -EIO;
}
@@ -4053,17 +4053,28 @@ out:
}
/**
- * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
+ * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
* @mgr: manager to notify irq for.
* @esi: 4 bytes from SINK_COUNT_ESI
+ * @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
* @handled: whether the hpd interrupt was consumed or not
*
- * This should be called from the driver when it detects a short IRQ,
+ * This should be called from the driver when it detects a HPD IRQ,
* along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
- * topology manager will process the sideband messages received as a result
- * of this.
+ * topology manager will process the sideband messages received
+ * as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
+ * corresponding flags that Driver has to ack the DP receiver later.
+ *
+ * Note that driver shall also call
+ * drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
+ * after calling this function, to try to kick off a new request in
+ * the queue if the previous message transaction is completed.
+ *
+ * See also:
+ * drm_dp_mst_hpd_irq_send_new_request()
*/
-int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
+int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
+ u8 *ack, bool *handled)
{
int ret = 0;
int sc;
@@ -4078,19 +4089,48 @@ int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handl
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
ret = drm_dp_mst_handle_down_rep(mgr);
*handled = true;
+ ack[1] |= DP_DOWN_REP_MSG_RDY;
}
if (esi[1] & DP_UP_REQ_MSG_RDY) {
ret |= drm_dp_mst_handle_up_req(mgr);
*handled = true;
+ ack[1] |= DP_UP_REQ_MSG_RDY;
}
- drm_dp_mst_kick_tx(mgr);
return ret;
}
-EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
+EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
/**
+ * drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
+ * @mgr: manager to notify irq for.
+ *
+ * This should be called from the driver when mst irq event is handled
+ * and acked. Note that new down request should only be sent when
+ * previous message transaction is completed. Source is not supposed to generate
+ * interleaved message transactions.
+ */
+void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ bool kick = true;
+
+ mutex_lock(&mgr->qlock);
+ txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
+ struct drm_dp_sideband_msg_tx, next);
+ /* If last transaction is not completed yet*/
+ if (!txmsg ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
+ kick = false;
+ mutex_unlock(&mgr->qlock);
+
+ if (kick)
+ drm_dp_mst_kick_tx(mgr);
+}
+EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
+/**
* drm_dp_mst_detect_port() - get connection status for an MST port
* @connector: DRM connector for this port
* @ctx: The acquisition context to use for grabbing locks
@@ -5733,7 +5773,7 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
aux->ddc.dev.parent = parent_dev;
aux->ddc.dev.of_node = parent_dev->of_node;
- strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
+ strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
sizeof(aux->ddc.name));
return i2c_add_adapter(&aux->ddc);
diff --git a/drivers/gpu/drm/display/drm_hdcp_helper.c b/drivers/gpu/drm/display/drm_hdcp_helper.c
index e78999c72bd7..a3f0e6d96105 100644
--- a/drivers/gpu/drm/display/drm_hdcp_helper.c
+++ b/drivers/gpu/drm/display/drm_hdcp_helper.c
@@ -415,7 +415,7 @@ void drm_hdcp_update_content_protection(struct drm_connector *connector,
return;
state->content_protection = val;
- drm_sysfs_connector_status_event(connector,
- dev->mode_config.content_protection_property);
+ drm_sysfs_connector_property_event(connector,
+ dev->mode_config.content_protection_property);
}
EXPORT_SYMBOL(drm_hdcp_update_content_protection);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index b4c6ffc438da..c277b198fa3f 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -140,6 +140,12 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
if (!state->planes)
goto fail;
+ /*
+ * Because drm_atomic_state can be committed asynchronously we need our
+ * own reference and cannot rely on the on implied by drm_file in the
+ * ioctl call.
+ */
+ drm_dev_get(dev);
state->dev = dev;
drm_dbg_atomic(dev, "Allocated atomic state %p\n", state);
@@ -299,7 +305,8 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
void __drm_atomic_state_free(struct kref *ref)
{
struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
- struct drm_mode_config *config = &state->dev->mode_config;
+ struct drm_device *dev = state->dev;
+ struct drm_mode_config *config = &dev->mode_config;
drm_atomic_state_clear(state);
@@ -311,6 +318,8 @@ void __drm_atomic_state_free(struct kref *ref)
drm_atomic_state_default_release(state);
kfree(state);
}
+
+ drm_dev_put(dev);
}
EXPORT_SYMBOL(__drm_atomic_state_free);
@@ -1131,6 +1140,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
+ drm_printf(p, "\tcolorspace=%s\n", drm_get_colorspace_name(state->colorspace));
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
if (state->writeback_job && state->writeback_job->fb)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 41b8066f61ff..292e38eb6218 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3332,7 +3332,7 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_all);
* that also takes a snapshot of the modeset state to be restored on resume.
*
* This is just a convenience wrapper around drm_atomic_helper_disable_all(),
- * and it is the atomic version of drm_crtc_force_disable_all().
+ * and it is the atomic version of drm_helper_force_disable_all().
*/
void drm_atomic_helper_shutdown(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index d867e7f9f2cd..98d3b10c08ae 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -374,16 +374,25 @@ drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
if (blob_id != 0) {
new_blob = drm_property_lookup_blob(dev, blob_id);
- if (new_blob == NULL)
+ if (new_blob == NULL) {
+ drm_dbg_atomic(dev,
+ "cannot find blob ID %llu\n", blob_id);
return -EINVAL;
+ }
if (expected_size > 0 &&
new_blob->length != expected_size) {
+ drm_dbg_atomic(dev,
+ "[BLOB:%d] length %zu different from expected %zu\n",
+ new_blob->base.id, new_blob->length, expected_size);
drm_property_blob_put(new_blob);
return -EINVAL;
}
if (expected_elem_size > 0 &&
new_blob->length % expected_elem_size != 0) {
+ drm_dbg_atomic(dev,
+ "[BLOB:%d] length %zu not divisible by element size %zu\n",
+ new_blob->base.id, new_blob->length, expected_elem_size);
drm_property_blob_put(new_blob);
return -EINVAL;
}
@@ -454,7 +463,7 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
return crtc->funcs->atomic_set_property(crtc, state, property, val);
} else {
drm_dbg_atomic(crtc->dev,
- "[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
+ "[CRTC:%d:%s] unknown property [PROP:%d:%s]\n",
crtc->base.id, crtc->name,
property->base.id, property->name);
return -EINVAL;
@@ -489,8 +498,13 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = state->scaling_filter;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
- else
+ else {
+ drm_dbg_atomic(dev,
+ "[CRTC:%d:%s] unknown property [PROP:%d:%s]\n",
+ crtc->base.id, crtc->name,
+ property->base.id, property->name);
return -EINVAL;
+ }
return 0;
}
@@ -525,8 +539,12 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
- if (val && !crtc)
+ if (val && !crtc) {
+ drm_dbg_atomic(dev,
+ "[PROP:%d:%s] cannot find CRTC with ID %llu\n",
+ property->base.id, property->name, val);
return -EACCES;
+ }
return drm_atomic_set_crtc_for_plane(state, crtc);
} else if (property == config->prop_crtc_x) {
state->crtc_x = U642I64(val);
@@ -577,7 +595,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
property, val);
} else {
drm_dbg_atomic(plane->dev,
- "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
+ "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
plane->base.id, plane->name,
property->base.id, property->name);
return -EINVAL;
@@ -636,6 +654,10 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
+ drm_dbg_atomic(dev,
+ "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
+ plane->base.id, plane->name,
+ property->base.id, property->name);
return -EINVAL;
}
@@ -677,14 +699,21 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
- if (val && !crtc)
+ if (val && !crtc) {
+ drm_dbg_atomic(dev,
+ "[PROP:%d:%s] cannot find CRTC with ID %llu\n",
+ property->base.id, property->name, val);
return -EACCES;
+ }
return drm_atomic_set_crtc_for_connector(state, crtc);
} else if (property == config->dpms_property) {
/* setting DPMS property requires special handling, which
* is done in legacy setprop path for us. Disallow (for
* now?) atomic writes to DPMS property:
*/
+ drm_dbg_atomic(dev,
+ "legacy [PROP:%d:%s] can only be set via legacy uAPI\n",
+ property->base.id, property->name);
return -EINVAL;
} else if (property == config->tv_select_subconnector_property) {
state->tv.select_subconnector = val;
@@ -774,7 +803,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state, property, val);
} else {
drm_dbg_atomic(connector->dev,
- "[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
+ "[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]\n",
connector->base.id, connector->name,
property->base.id, property->name);
return -EINVAL;
@@ -856,6 +885,10 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
return connector->funcs->atomic_get_property(connector,
state, property, val);
} else {
+ drm_dbg_atomic(dev,
+ "[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]\n",
+ connector->base.id, connector->name,
+ property->base.id, property->name);
return -EINVAL;
}
@@ -894,6 +927,7 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
break;
}
default:
+ drm_dbg_atomic(dev, "[OBJECT:%d] has no properties\n", obj->id);
ret = -EINVAL;
break;
}
@@ -1030,6 +1064,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
default:
+ drm_dbg_atomic(prop->dev, "[OBJECT:%d] has no properties\n", obj->id);
ret = -EINVAL;
break;
}
@@ -1230,8 +1265,10 @@ static int prepare_signaling(struct drm_device *dev,
* Having this flag means user mode pends on event which will never
* reach due to lack of at least one CRTC for signaling
*/
- if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
+ if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) {
+ drm_dbg_atomic(dev, "need at least one CRTC for DRM_MODE_PAGE_FLIP_EVENT");
return -EINVAL;
+ }
return 0;
}
@@ -1364,11 +1401,13 @@ retry:
obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
if (!obj) {
+ drm_dbg_atomic(dev, "cannot find object ID %d", obj_id);
ret = -ENOENT;
goto out;
}
if (!obj->properties) {
+ drm_dbg_atomic(dev, "[OBJECT:%d] has no properties", obj_id);
drm_mode_object_put(obj);
ret = -ENOENT;
goto out;
@@ -1395,6 +1434,9 @@ retry:
prop = drm_mode_obj_find_prop_id(obj, prop_id);
if (!prop) {
+ drm_dbg_atomic(dev,
+ "[OBJECT:%d] cannot find property ID %d",
+ obj_id, prop_id);
drm_mode_object_put(obj);
ret = -ENOENT;
goto out;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c3d69af02e79..39e68e45bb12 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -27,8 +27,10 @@
#include <linux/mutex.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_file.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -1345,6 +1347,50 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
+#ifdef CONFIG_DEBUG_FS
+static int drm_bridge_chains_info(struct seq_file *m, void *data)
+{
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_encoder *encoder;
+ unsigned int bridge_idx = 0;
+
+ list_for_each_entry(encoder, &config->encoder_list, head) {
+ struct drm_bridge *bridge;
+
+ drm_printf(&p, "encoder[%u]\n", encoder->base.id);
+
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x",
+ bridge_idx, bridge->type, bridge->ops);
+
+#ifdef CONFIG_OF
+ if (bridge->of_node)
+ drm_printf(&p, ", OF: %pOFfc", bridge->of_node);
+#endif
+
+ drm_printf(&p, "\n");
+
+ bridge_idx++;
+ }
+ }
+
+ return 0;
+}
+
+static const struct drm_debugfs_info drm_bridge_debugfs_list[] = {
+ { "bridge_chains", drm_bridge_chains_info, 0 },
+};
+
+void drm_bridge_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_add_files(minor->dev, drm_bridge_debugfs_list,
+ ARRAY_SIZE(drm_bridge_debugfs_list));
+}
+#endif
+
MODULE_AUTHOR("Ajay Kumar <[email protected]>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 19ae4a177ac3..1da93d5a1f61 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -125,7 +125,7 @@ static void drm_bridge_connector_hpd_cb(void *cb_data,
drm_bridge_connector_hpd_notify(connector, status);
- drm_kms_helper_hotplug_event(dev);
+ drm_kms_helper_connector_hotplug_event(connector);
}
static void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
@@ -318,6 +318,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct i2c_adapter *ddc = NULL;
struct drm_bridge *bridge, *panel_bridge = NULL;
int connector_type;
+ int ret;
bridge_connector = kzalloc(sizeof(*bridge_connector), GFP_KERNEL);
if (!bridge_connector)
@@ -368,8 +369,14 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(-EINVAL);
}
- drm_connector_init_with_ddc(drm, connector, &drm_bridge_connector_funcs,
- connector_type, ddc);
+ ret = drm_connector_init_with_ddc(drm, connector,
+ &drm_bridge_connector_funcs,
+ connector_type, ddc);
+ if (ret) {
+ kfree(bridge_connector);
+ return ERR_PTR(ret);
+ }
+
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 7098f125b54a..e6f5ba5f4baf 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -781,15 +781,15 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
count++;
}
- drm_printf(p, "order-%d ", order);
+ drm_printf(p, "order-%2d ", order);
free = count * (mm->chunk_size << order);
if (free < SZ_1M)
- drm_printf(p, "free: %lluKiB", free >> 10);
+ drm_printf(p, "free: %8llu KiB", free >> 10);
else
- drm_printf(p, "free: %lluMiB", free >> 20);
+ drm_printf(p, "free: %8llu MiB", free >> 20);
- drm_printf(p, ", pages: %llu\n", count);
+ drm_printf(p, ", blocks: %llu\n", count);
}
}
EXPORT_SYMBOL(drm_buddy_print);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index f6292ba0e6fc..037e36f2049c 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -122,13 +122,34 @@ EXPORT_SYMBOL(drm_client_init);
* drm_client_register() it is no longer permissible to call drm_client_release()
* directly (outside the unregister callback), instead cleanup will happen
* automatically on driver unload.
+ *
+ * Registering a client generates a hotplug event that allows the client
+ * to set up its display from pre-existing outputs. The client must have
+ * initialized its state to able to handle the hotplug event successfully.
*/
void drm_client_register(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
+ int ret;
mutex_lock(&dev->clientlist_mutex);
list_add(&client->list, &dev->clientlist);
+
+ if (client->funcs && client->funcs->hotplug) {
+ /*
+ * Perform an initial hotplug event to pick up the
+ * display configuration for the client. This step
+ * has to be performed *after* registering the client
+ * in the list of clients, or a concurrent hotplug
+ * event might be lost; leaving the display off.
+ *
+ * Hold the clientlist_mutex as for a regular hotplug
+ * event.
+ */
+ ret = client->funcs->hotplug(client);
+ if (ret)
+ drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
+ }
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_register);
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 1b12a3c201a3..871e4e2129d6 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -311,6 +311,9 @@ static bool drm_client_target_cloned(struct drm_device *dev,
can_clone = true;
dmt_mode = drm_mode_find_dmt(dev, 1024, 768, 60, false);
+ if (!dmt_mode)
+ goto fail;
+
for (i = 0; i < connector_count; i++) {
if (!enabled[i])
continue;
@@ -326,11 +329,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
if (!modes[i])
can_clone = false;
}
+ kfree(dmt_mode);
if (can_clone) {
DRM_DEBUG_KMS("can clone using 1024x768\n");
return true;
}
+fail:
DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
return false;
}
@@ -862,6 +867,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
break;
}
+ kfree(modeset->mode);
modeset->mode = drm_mode_duplicate(dev, mode);
drm_connector_get(connector);
modeset->connectors[modeset->num_connectors++] = connector;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 48df7a5ea503..bf8371dc2a61 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1055,64 +1055,85 @@ static const struct drm_prop_enum_list drm_dp_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_dp_subconnector_name,
drm_dp_subconnector_enum_list)
-static const struct drm_prop_enum_list hdmi_colorspaces[] = {
+
+static const char * const colorspace_names[] = {
/* For Default case, driver will set the colorspace */
- { DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
+ [DRM_MODE_COLORIMETRY_DEFAULT] = "Default",
/* Standard Definition Colorimetry based on CEA 861 */
- { DRM_MODE_COLORIMETRY_SMPTE_170M_YCC, "SMPTE_170M_YCC" },
- { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" },
+ [DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = "SMPTE_170M_YCC",
+ [DRM_MODE_COLORIMETRY_BT709_YCC] = "BT709_YCC",
/* Standard Definition Colorimetry based on IEC 61966-2-4 */
- { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
+ [DRM_MODE_COLORIMETRY_XVYCC_601] = "XVYCC_601",
/* High Definition Colorimetry based on IEC 61966-2-4 */
- { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
+ [DRM_MODE_COLORIMETRY_XVYCC_709] = "XVYCC_709",
/* Colorimetry based on IEC 61966-2-1/Amendment 1 */
- { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" },
+ [DRM_MODE_COLORIMETRY_SYCC_601] = "SYCC_601",
/* Colorimetry based on IEC 61966-2-5 [33] */
- { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" },
+ [DRM_MODE_COLORIMETRY_OPYCC_601] = "opYCC_601",
/* Colorimetry based on IEC 61966-2-5 */
- { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
+ [DRM_MODE_COLORIMETRY_OPRGB] = "opRGB",
/* Colorimetry based on ITU-R BT.2020 */
- { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" },
+ [DRM_MODE_COLORIMETRY_BT2020_CYCC] = "BT2020_CYCC",
/* Colorimetry based on ITU-R BT.2020 */
- { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
+ [DRM_MODE_COLORIMETRY_BT2020_RGB] = "BT2020_RGB",
/* Colorimetry based on ITU-R BT.2020 */
- { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
+ [DRM_MODE_COLORIMETRY_BT2020_YCC] = "BT2020_YCC",
/* Added as part of Additional Colorimetry Extension in 861.G */
- { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
- { DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" },
+ [DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65] = "DCI-P3_RGB_D65",
+ [DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER] = "DCI-P3_RGB_Theater",
+ [DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED] = "RGB_WIDE_FIXED",
+ /* Colorimetry based on scRGB (IEC 61966-2-2) */
+ [DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT] = "RGB_WIDE_FLOAT",
+ [DRM_MODE_COLORIMETRY_BT601_YCC] = "BT601_YCC",
};
+/**
+ * drm_get_colorspace_name - return a string for color encoding
+ * @colorspace: color space to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_colorspace_name(enum drm_colorspace colorspace)
+{
+ if (colorspace < ARRAY_SIZE(colorspace_names) && colorspace_names[colorspace])
+ return colorspace_names[colorspace];
+ else
+ return "(null)";
+}
+
+static const u32 hdmi_colorspaces =
+ BIT(DRM_MODE_COLORIMETRY_SMPTE_170M_YCC) |
+ BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
+ BIT(DRM_MODE_COLORIMETRY_XVYCC_601) |
+ BIT(DRM_MODE_COLORIMETRY_XVYCC_709) |
+ BIT(DRM_MODE_COLORIMETRY_SYCC_601) |
+ BIT(DRM_MODE_COLORIMETRY_OPYCC_601) |
+ BIT(DRM_MODE_COLORIMETRY_OPRGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_CYCC) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_YCC) |
+ BIT(DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65) |
+ BIT(DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER);
+
/*
* As per DP 1.4a spec, 2.2.5.7.5 VSC SDP Payload for Pixel Encoding/Colorimetry
* Format Table 2-120
*/
-static const struct drm_prop_enum_list dp_colorspaces[] = {
- /* For Default case, driver will set the colorspace */
- { DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
- { DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED, "RGB_Wide_Gamut_Fixed_Point" },
- /* Colorimetry based on scRGB (IEC 61966-2-2) */
- { DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT, "RGB_Wide_Gamut_Floating_Point" },
- /* Colorimetry based on IEC 61966-2-5 */
- { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
- /* Colorimetry based on SMPTE RP 431-2 */
- { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
- /* Colorimetry based on ITU-R BT.2020 */
- { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
- { DRM_MODE_COLORIMETRY_BT601_YCC, "BT601_YCC" },
- { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" },
- /* Standard Definition Colorimetry based on IEC 61966-2-4 */
- { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
- /* High Definition Colorimetry based on IEC 61966-2-4 */
- { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
- /* Colorimetry based on IEC 61966-2-1/Amendment 1 */
- { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" },
- /* Colorimetry based on IEC 61966-2-5 [33] */
- { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" },
- /* Colorimetry based on ITU-R BT.2020 */
- { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" },
- /* Colorimetry based on ITU-R BT.2020 */
- { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
-};
+static const u32 dp_colorspaces =
+ BIT(DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED) |
+ BIT(DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT) |
+ BIT(DRM_MODE_COLORIMETRY_OPRGB) |
+ BIT(DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT601_YCC) |
+ BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
+ BIT(DRM_MODE_COLORIMETRY_XVYCC_601) |
+ BIT(DRM_MODE_COLORIMETRY_XVYCC_709) |
+ BIT(DRM_MODE_COLORIMETRY_SYCC_601) |
+ BIT(DRM_MODE_COLORIMETRY_OPYCC_601) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_CYCC) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
/**
* DOC: standard connector properties
@@ -2135,33 +2156,72 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* drm_mode_create_dp_colorspace_property() is used for DP connector.
*/
-/**
- * drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
- * @connector: connector to create the Colorspace property on.
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * HDMI connectors.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector)
+static int drm_mode_create_colorspace_property(struct drm_connector *connector,
+ u32 supported_colorspaces)
{
struct drm_device *dev = connector->dev;
+ u32 colorspaces = supported_colorspaces | BIT(DRM_MODE_COLORIMETRY_DEFAULT);
+ struct drm_prop_enum_list enum_list[DRM_MODE_COLORIMETRY_COUNT];
+ int i, len;
if (connector->colorspace_property)
return 0;
+ if (!supported_colorspaces) {
+ drm_err(dev, "No supported colorspaces provded on [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ if ((supported_colorspaces & -BIT(DRM_MODE_COLORIMETRY_COUNT)) != 0) {
+ drm_err(dev, "Unknown colorspace provded on [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ len = 0;
+ for (i = 0; i < DRM_MODE_COLORIMETRY_COUNT; i++) {
+ if ((colorspaces & BIT(i)) == 0)
+ continue;
+
+ enum_list[len].type = i;
+ enum_list[len].name = colorspace_names[i];
+ len++;
+ }
+
connector->colorspace_property =
drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace",
- hdmi_colorspaces,
- ARRAY_SIZE(hdmi_colorspaces));
+ enum_list,
+ len);
if (!connector->colorspace_property)
return -ENOMEM;
return 0;
}
+
+/**
+ * drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
+ * @connector: connector to create the Colorspace property on.
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * HDMI connectors.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector,
+ u32 supported_colorspaces)
+{
+ u32 colorspaces;
+
+ if (supported_colorspaces)
+ colorspaces = supported_colorspaces & hdmi_colorspaces;
+ else
+ colorspaces = hdmi_colorspaces;
+
+ return drm_mode_create_colorspace_property(connector, colorspaces);
+}
EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property);
/**
@@ -2174,22 +2234,17 @@ EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property);
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_mode_create_dp_colorspace_property(struct drm_connector *connector)
+int drm_mode_create_dp_colorspace_property(struct drm_connector *connector,
+ u32 supported_colorspaces)
{
- struct drm_device *dev = connector->dev;
+ u32 colorspaces;
- if (connector->colorspace_property)
- return 0;
-
- connector->colorspace_property =
- drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace",
- dp_colorspaces,
- ARRAY_SIZE(dp_colorspaces));
-
- if (!connector->colorspace_property)
- return -ENOMEM;
+ if (supported_colorspaces)
+ colorspaces = supported_colorspaces & dp_colorspaces;
+ else
+ colorspaces = dp_colorspaces;
- return 0;
+ return drm_mode_create_colorspace_property(connector, colorspaces);
}
EXPORT_SYMBOL(drm_mode_create_dp_colorspace_property);
@@ -2675,10 +2730,10 @@ static int drm_connector_privacy_screen_notifier(
drm_connector_update_privacy_screen_properties(connector, true);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
- drm_sysfs_connector_status_event(connector,
- connector->privacy_screen_sw_state_property);
- drm_sysfs_connector_status_event(connector,
- connector->privacy_screen_hw_state_property);
+ drm_sysfs_connector_property_event(connector,
+ connector->privacy_screen_sw_state_property);
+ drm_sysfs_connector_property_event(connector,
+ connector->privacy_screen_hw_state_property);
return NOTIFY_DONE;
}
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 4855230ba2c6..2de43ff3ce0a 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -31,6 +31,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_client.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
@@ -39,6 +40,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_managed.h>
+#include <drm/drm_gpuva_mgr.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -175,6 +177,45 @@ static const struct file_operations drm_debugfs_fops = {
.release = single_release,
};
+/**
+ * drm_debugfs_gpuva_info - dump the given DRM GPU VA space
+ * @m: pointer to the &seq_file to write
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ *
+ * Dumps the GPU VA mappings of a given DRM GPU VA manager.
+ *
+ * For each DRM GPU VA space drivers should call this function from their
+ * &drm_info_list's show callback.
+ *
+ * Returns: 0 on success, -ENODEV if the &mgr is not initialized
+ */
+int drm_debugfs_gpuva_info(struct seq_file *m,
+ struct drm_gpuva_manager *mgr)
+{
+ struct drm_gpuva *va, *kva = &mgr->kernel_alloc_node;
+
+ if (!mgr->name)
+ return -ENODEV;
+
+ seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n",
+ mgr->name, mgr->mm_start, mgr->mm_start + mgr->mm_range);
+ seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n",
+ kva->va.addr, kva->va.addr + kva->va.range);
+ seq_puts(m, "\n");
+ seq_puts(m, " VAs | start | range | end | object | object offset\n");
+ seq_puts(m, "-------------------------------------------------------------------------------------------------------------\n");
+ drm_gpuva_for_each_va(va, mgr) {
+ if (unlikely(va == kva))
+ continue;
+
+ seq_printf(m, " | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx | 0x%016llx\n",
+ va->va.addr, va->va.range, va->va.addr + va->va.range,
+ (u64)(uintptr_t)va->gem.obj, va->gem.offset);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_debugfs_gpuva_info);
/**
* drm_debugfs_create_files - Initialize a given set of debugfs files for DRM
@@ -234,6 +275,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
if (drm_drv_uses_atomic_modeset(dev)) {
drm_atomic_debugfs_init(minor);
+ drm_bridge_debugfs_init(minor);
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 12687dd9e1ac..3eda026ffac6 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -84,7 +84,7 @@ DEFINE_STATIC_SRCU(drm_unplug_srcu);
*/
static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
- unsigned int type)
+ enum drm_minor_type type)
{
switch (type) {
case DRM_MINOR_PRIMARY:
@@ -116,7 +116,7 @@ static void drm_minor_alloc_release(struct drm_device *dev, void *data)
}
}
-static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
+static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
unsigned long flags;
@@ -160,7 +160,7 @@ static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
return 0;
}
-static int drm_minor_register(struct drm_device *dev, unsigned int type)
+static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
unsigned long flags;
@@ -203,7 +203,7 @@ err_debugfs:
return ret;
}
-static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
+static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
{
struct drm_minor *minor;
unsigned long flags;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e0dbd9140726..340da8257b51 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -230,6 +230,7 @@ static const struct edid_quirk {
/* OSVR HDK and HDK2 VR Headsets */
EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('A', 'U', 'O', 0x1111, EDID_QUIRK_NON_DESKTOP),
};
/*
@@ -3456,6 +3457,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
connector->base.id, connector->name);
return NULL;
}
+ if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
+ connector->base.id, connector->name);
+ }
/* it is incorrect if hsync/vsync width is zero */
if (!hsync_pulse_width || !vsync_pulse_width) {
@@ -3502,27 +3507,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
- switch (pt->misc & DRM_EDID_PT_SYNC_MASK) {
- case DRM_EDID_PT_ANALOG_CSYNC:
- case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC:
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n",
- connector->base.id, connector->name);
- mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC;
- break;
- case DRM_EDID_PT_DIGITAL_CSYNC:
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n",
- connector->base.id, connector->name);
- mode->flags |= DRM_MODE_FLAG_CSYNC;
- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC;
- break;
- case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC:
- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
- mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
- break;
- }
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
}
set_size:
@@ -3962,7 +3950,7 @@ static int drm_cvt_modes(struct drm_connector *connector,
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
const struct cvt_timing *cvt;
- const int rates[] = { 60, 85, 75, 60, 50 };
+ static const int rates[] = { 60, 85, 75, 60, 50 };
const u8 empty[3] = { 0, 0, 0 };
for (i = 0; i < 4; i++) {
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
new file mode 100644
index 000000000000..ff69cf0fb42a
--- /dev/null
+++ b/drivers/gpu/drm/drm_exec.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+#include <drm/drm_exec.h>
+#include <drm/drm_gem.h>
+#include <linux/dma-resv.h>
+
+/**
+ * DOC: Overview
+ *
+ * This component mainly abstracts the retry loop necessary for locking
+ * multiple GEM objects while preparing hardware operations (e.g. command
+ * submissions, page table updates etc..).
+ *
+ * If a contention is detected while locking a GEM object the cleanup procedure
+ * unlocks all previously locked GEM objects and locks the contended one first
+ * before locking any further objects.
+ *
+ * After an object is locked fences slots can optionally be reserved on the
+ * dma_resv object inside the GEM object.
+ *
+ * A typical usage pattern should look like this::
+ *
+ * struct drm_gem_object *obj;
+ * struct drm_exec exec;
+ * unsigned long index;
+ * int ret;
+ *
+ * drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ * drm_exec_until_all_locked(&exec) {
+ * ret = drm_exec_prepare_obj(&exec, boA, 1);
+ * drm_exec_retry_on_contention(&exec);
+ * if (ret)
+ * goto error;
+ *
+ * ret = drm_exec_prepare_obj(&exec, boB, 1);
+ * drm_exec_retry_on_contention(&exec);
+ * if (ret)
+ * goto error;
+ * }
+ *
+ * drm_exec_for_each_locked_object(&exec, index, obj) {
+ * dma_resv_add_fence(obj->resv, fence, DMA_RESV_USAGE_READ);
+ * ...
+ * }
+ * drm_exec_fini(&exec);
+ *
+ * See struct dma_exec for more details.
+ */
+
+/* Dummy value used to initially enter the retry loop */
+#define DRM_EXEC_DUMMY ((void *)~0)
+
+/* Unlock all objects and drop references */
+static void drm_exec_unlock_all(struct drm_exec *exec)
+{
+ struct drm_gem_object *obj;
+ unsigned long index;
+
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ dma_resv_unlock(obj->resv);
+ drm_gem_object_put(obj);
+ }
+
+ drm_gem_object_put(exec->prelocked);
+ exec->prelocked = NULL;
+}
+
+/**
+ * drm_exec_init - initialize a drm_exec object
+ * @exec: the drm_exec object to initialize
+ * @flags: controls locking behavior, see DRM_EXEC_* defines
+ *
+ * Initialize the object and make sure that we can track locked objects.
+ */
+void drm_exec_init(struct drm_exec *exec, uint32_t flags)
+{
+ exec->flags = flags;
+ exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ /* If allocation here fails, just delay that till the first use */
+ exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0;
+ exec->num_objects = 0;
+ exec->contended = DRM_EXEC_DUMMY;
+ exec->prelocked = NULL;
+}
+EXPORT_SYMBOL(drm_exec_init);
+
+/**
+ * drm_exec_fini - finalize a drm_exec object
+ * @exec: the drm_exec object to finalize
+ *
+ * Unlock all locked objects, drop the references to objects and free all memory
+ * used for tracking the state.
+ */
+void drm_exec_fini(struct drm_exec *exec)
+{
+ drm_exec_unlock_all(exec);
+ kvfree(exec->objects);
+ if (exec->contended != DRM_EXEC_DUMMY) {
+ drm_gem_object_put(exec->contended);
+ ww_acquire_fini(&exec->ticket);
+ }
+}
+EXPORT_SYMBOL(drm_exec_fini);
+
+/**
+ * drm_exec_cleanup - cleanup when contention is detected
+ * @exec: the drm_exec object to cleanup
+ *
+ * Cleanup the current state and return true if we should stay inside the retry
+ * loop, false if there wasn't any contention detected and we can keep the
+ * objects locked.
+ */
+bool drm_exec_cleanup(struct drm_exec *exec)
+{
+ if (likely(!exec->contended)) {
+ ww_acquire_done(&exec->ticket);
+ return false;
+ }
+
+ if (likely(exec->contended == DRM_EXEC_DUMMY)) {
+ exec->contended = NULL;
+ ww_acquire_init(&exec->ticket, &reservation_ww_class);
+ return true;
+ }
+
+ drm_exec_unlock_all(exec);
+ exec->num_objects = 0;
+ return true;
+}
+EXPORT_SYMBOL(drm_exec_cleanup);
+
+/* Track the locked object in the array */
+static int drm_exec_obj_locked(struct drm_exec *exec,
+ struct drm_gem_object *obj)
+{
+ if (unlikely(exec->num_objects == exec->max_objects)) {
+ size_t size = exec->max_objects * sizeof(void *);
+ void *tmp;
+
+ tmp = kvrealloc(exec->objects, size, size + PAGE_SIZE,
+ GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ exec->objects = tmp;
+ exec->max_objects += PAGE_SIZE / sizeof(void *);
+ }
+ drm_gem_object_get(obj);
+ exec->objects[exec->num_objects++] = obj;
+
+ return 0;
+}
+
+/* Make sure the contended object is locked first */
+static int drm_exec_lock_contended(struct drm_exec *exec)
+{
+ struct drm_gem_object *obj = exec->contended;
+ int ret;
+
+ if (likely(!obj))
+ return 0;
+
+ /* Always cleanup the contention so that error handling can kick in */
+ exec->contended = NULL;
+ if (exec->flags & DRM_EXEC_INTERRUPTIBLE_WAIT) {
+ ret = dma_resv_lock_slow_interruptible(obj->resv,
+ &exec->ticket);
+ if (unlikely(ret))
+ goto error_dropref;
+ } else {
+ dma_resv_lock_slow(obj->resv, &exec->ticket);
+ }
+
+ ret = drm_exec_obj_locked(exec, obj);
+ if (unlikely(ret))
+ goto error_unlock;
+
+ exec->prelocked = obj;
+ return 0;
+
+error_unlock:
+ dma_resv_unlock(obj->resv);
+
+error_dropref:
+ drm_gem_object_put(obj);
+ return ret;
+}
+
+/**
+ * drm_exec_lock_obj - lock a GEM object for use
+ * @exec: the drm_exec object with the state
+ * @obj: the GEM object to lock
+ *
+ * Lock a GEM object for use and grab a reference to it.
+ *
+ * Returns: -EDEADLK if a contention is detected, -EALREADY when object is
+ * already locked (can be suppressed by setting the DRM_EXEC_IGNORE_DUPLICATES
+ * flag), -ENOMEM when memory allocation failed and zero for success.
+ */
+int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = drm_exec_lock_contended(exec);
+ if (unlikely(ret))
+ return ret;
+
+ if (exec->prelocked == obj) {
+ drm_gem_object_put(exec->prelocked);
+ exec->prelocked = NULL;
+ return 0;
+ }
+
+ if (exec->flags & DRM_EXEC_INTERRUPTIBLE_WAIT)
+ ret = dma_resv_lock_interruptible(obj->resv, &exec->ticket);
+ else
+ ret = dma_resv_lock(obj->resv, &exec->ticket);
+
+ if (unlikely(ret == -EDEADLK)) {
+ drm_gem_object_get(obj);
+ exec->contended = obj;
+ return -EDEADLK;
+ }
+
+ if (unlikely(ret == -EALREADY) &&
+ exec->flags & DRM_EXEC_IGNORE_DUPLICATES)
+ return 0;
+
+ if (unlikely(ret))
+ return ret;
+
+ ret = drm_exec_obj_locked(exec, obj);
+ if (ret)
+ goto error_unlock;
+
+ return 0;
+
+error_unlock:
+ dma_resv_unlock(obj->resv);
+ return ret;
+}
+EXPORT_SYMBOL(drm_exec_lock_obj);
+
+/**
+ * drm_exec_unlock_obj - unlock a GEM object in this exec context
+ * @exec: the drm_exec object with the state
+ * @obj: the GEM object to unlock
+ *
+ * Unlock the GEM object and remove it from the collection of locked objects.
+ * Should only be used to unlock the most recently locked objects. It's not time
+ * efficient to unlock objects locked long ago.
+ */
+void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj)
+{
+ unsigned int i;
+
+ for (i = exec->num_objects; i--;) {
+ if (exec->objects[i] == obj) {
+ dma_resv_unlock(obj->resv);
+ for (++i; i < exec->num_objects; ++i)
+ exec->objects[i - 1] = exec->objects[i];
+ --exec->num_objects;
+ drm_gem_object_put(obj);
+ return;
+ }
+
+ }
+}
+EXPORT_SYMBOL(drm_exec_unlock_obj);
+
+/**
+ * drm_exec_prepare_obj - prepare a GEM object for use
+ * @exec: the drm_exec object with the state
+ * @obj: the GEM object to prepare
+ * @num_fences: how many fences to reserve
+ *
+ * Prepare a GEM object for use by locking it and reserving fence slots.
+ *
+ * Returns: -EDEADLK if a contention is detected, -EALREADY when object is
+ * already locked, -ENOMEM when memory allocation failed and zero for success.
+ */
+int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+ unsigned int num_fences)
+{
+ int ret;
+
+ ret = drm_exec_lock_obj(exec, obj);
+ if (ret)
+ return ret;
+
+ ret = dma_resv_reserve_fences(obj->resv, num_fences);
+ if (ret) {
+ drm_exec_unlock_obj(exec, obj);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_exec_prepare_obj);
+
+/**
+ * drm_exec_prepare_array - helper to prepare an array of objects
+ * @exec: the drm_exec object with the state
+ * @objects: array of GEM object to prepare
+ * @num_objects: number of GEM objects in the array
+ * @num_fences: number of fences to reserve on each GEM object
+ *
+ * Prepares all GEM objects in an array, aborts on first error.
+ * Reserves @num_fences on each GEM object after locking it.
+ *
+ * Returns: -EDEADLOCK on contention, -EALREADY when object is already locked,
+ * -ENOMEM when memory allocation failed and zero for success.
+ */
+int drm_exec_prepare_array(struct drm_exec *exec,
+ struct drm_gem_object **objects,
+ unsigned int num_objects,
+ unsigned int num_fences)
+{
+ int ret;
+
+ for (unsigned int i = 0; i < num_objects; ++i) {
+ ret = drm_exec_prepare_obj(exec, objects[i], num_fences);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_exec_prepare_array);
+
+MODULE_DESCRIPTION("DRM execution context");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 9978147bbc8a..61a5d450cc20 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1187,17 +1187,19 @@ static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
}
}
-static void __fill_var(struct fb_var_screeninfo *var,
+static void __fill_var(struct fb_var_screeninfo *var, struct fb_info *info,
struct drm_framebuffer *fb)
{
int i;
var->xres_virtual = fb->width;
var->yres_virtual = fb->height;
- var->accel_flags = FB_ACCELF_TEXT;
+ var->accel_flags = 0;
var->bits_per_pixel = drm_format_info_bpp(fb->format, 0);
- var->height = var->width = 0;
+ var->height = info->var.height;
+ var->width = info->var.width;
+
var->left_margin = var->right_margin = 0;
var->upper_margin = var->lower_margin = 0;
var->hsync_len = var->vsync_len = 0;
@@ -1260,7 +1262,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
- __fill_var(var, fb);
+ __fill_var(var, info, fb);
/*
* fb_pan_display() validates this, but fb_set_par() doesn't and just
@@ -1716,7 +1718,7 @@ static void drm_fb_helper_fill_var(struct fb_info *info,
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xoffset = 0;
info->var.yoffset = 0;
- __fill_var(&info->var, fb);
+ __fill_var(&info->var, info, fb);
info->var.activate = FB_ACTIVATE_NOW;
drm_fb_helper_fill_pixel_fmt(&info->var, format);
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index d86773fa8ab0..6c9427bb4053 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -54,21 +54,17 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
-
- if (drm_WARN_ON_ONCE(dev, !fb_helper->dev->driver->gem_prime_mmap))
- return -ENODEV;
- return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
+ return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
}
static const struct fb_ops drm_fbdev_dma_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
- __FB_DEFAULT_SYS_OPS_RDWR,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
- __FB_DEFAULT_SYS_OPS_DRAW,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_mmap = drm_fbdev_dma_fb_mmap,
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
@@ -127,7 +123,6 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fbops = &drm_fbdev_dma_fb_ops;
- info->flags = FBINFO_DEFAULT;
/* screen */
info->flags |= FBINFO_VIRTFB; /* system memory */
@@ -217,7 +212,7 @@ static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
* drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device.
- * @dev->mode_config.preferred_depth is used if this is zero.
+ * 32 is used if this is zero.
*
* This function sets up fbdev emulation for GEM DMA drivers that support
* dumb buffers with a virtual address and that can be mmap'ed.
@@ -252,10 +247,6 @@ void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
goto err_drm_client_init;
}
- ret = drm_fbdev_dma_client_hotplug(&fb_helper->client);
- if (ret)
- drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
drm_client_register(&fb_helper->client);
return;
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
index 98ae703848a0..d647d89764cb 100644
--- a/drivers/gpu/drm/drm_fbdev_generic.c
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -34,9 +34,9 @@ static int drm_fbdev_generic_fb_release(struct fb_info *info, int user)
return 0;
}
-FB_GEN_DEFAULT_DEFERRED_SYS_OPS(drm_fbdev_generic,
- drm_fb_helper_damage_range,
- drm_fb_helper_damage_area);
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(drm_fbdev_generic,
+ drm_fb_helper_damage_range,
+ drm_fb_helper_damage_area);
static void drm_fbdev_generic_fb_destroy(struct fb_info *info)
{
@@ -109,7 +109,6 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fbops = &drm_fbdev_generic_fb_ops;
- info->flags = FBINFO_DEFAULT;
/* screen */
info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
@@ -339,10 +338,6 @@ void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
goto err_drm_client_init;
}
- ret = drm_fbdev_generic_client_hotplug(&fb_helper->client);
- if (ret)
- drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
drm_client_register(&fb_helper->client);
return;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 1a5a2cd0d4ec..6129b89bb366 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -164,6 +164,9 @@ void drm_gem_private_object_init(struct drm_device *dev,
if (!obj->resv)
obj->resv = &obj->_resv;
+ if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA))
+ drm_gem_gpuva_init(obj);
+
drm_vma_node_reset(&obj->vma_node);
INIT_LIST_HEAD(&obj->lru_node);
}
@@ -496,13 +499,13 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
/*
- * Move pages to appropriate lru and release the pagevec, decrementing the
- * ref count of those pages.
+ * Move folios to appropriate lru and release the folios, decrementing the
+ * ref count of those folios.
*/
-static void drm_gem_check_release_pagevec(struct pagevec *pvec)
+static void drm_gem_check_release_batch(struct folio_batch *fbatch)
{
- check_move_unevictable_pages(pvec);
- __pagevec_release(pvec);
+ check_move_unevictable_folios(fbatch);
+ __folio_batch_release(fbatch);
cond_resched();
}
@@ -534,10 +537,10 @@ static void drm_gem_check_release_pagevec(struct pagevec *pvec)
struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{
struct address_space *mapping;
- struct page *p, **pages;
- struct pagevec pvec;
- int i, npages;
-
+ struct page **pages;
+ struct folio *folio;
+ struct folio_batch fbatch;
+ int i, j, npages;
if (WARN_ON(!obj->filp))
return ERR_PTR(-EINVAL);
@@ -559,11 +562,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
mapping_set_unevictable(mapping);
- for (i = 0; i < npages; i++) {
- p = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(p))
+ i = 0;
+ while (i < npages) {
+ folio = shmem_read_folio_gfp(mapping, i,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
goto fail;
- pages[i] = p;
+ for (j = 0; j < folio_nr_pages(folio); j++, i++)
+ pages[i] = folio_file_page(folio, i);
/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
* correct region during swapin. Note that this requires
@@ -571,23 +577,26 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
* so shmem can relocate pages during swapin if required.
*/
BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
- (page_to_pfn(p) >= 0x00100000UL));
+ (folio_pfn(folio) >= 0x00100000UL));
}
return pages;
fail:
mapping_clear_unevictable(mapping);
- pagevec_init(&pvec);
- while (i--) {
- if (!pagevec_add(&pvec, pages[i]))
- drm_gem_check_release_pagevec(&pvec);
+ folio_batch_init(&fbatch);
+ j = 0;
+ while (j < i) {
+ struct folio *f = page_folio(pages[j]);
+ if (!folio_batch_add(&fbatch, f))
+ drm_gem_check_release_batch(&fbatch);
+ j += folio_nr_pages(f);
}
- if (pagevec_count(&pvec))
- drm_gem_check_release_pagevec(&pvec);
+ if (fbatch.nr)
+ drm_gem_check_release_batch(&fbatch);
kvfree(pages);
- return ERR_CAST(p);
+ return ERR_CAST(folio);
}
EXPORT_SYMBOL(drm_gem_get_pages);
@@ -603,7 +612,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
{
int i, npages;
struct address_space *mapping;
- struct pagevec pvec;
+ struct folio_batch fbatch;
mapping = file_inode(obj->filp)->i_mapping;
mapping_clear_unevictable(mapping);
@@ -616,23 +625,27 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
npages = obj->size >> PAGE_SHIFT;
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
for (i = 0; i < npages; i++) {
+ struct folio *folio;
+
if (!pages[i])
continue;
+ folio = page_folio(pages[i]);
if (dirty)
- set_page_dirty(pages[i]);
+ folio_mark_dirty(folio);
if (accessed)
- mark_page_accessed(pages[i]);
+ folio_mark_accessed(folio);
/* Undo the reference we took when populating the table */
- if (!pagevec_add(&pvec, pages[i]))
- drm_gem_check_release_pagevec(&pvec);
+ if (!folio_batch_add(&fbatch, folio))
+ drm_gem_check_release_batch(&fbatch);
+ i += folio_nr_pages(folio) - 1;
}
- if (pagevec_count(&pvec))
- drm_gem_check_release_pagevec(&pvec);
+ if (folio_batch_count(&fbatch))
+ drm_gem_check_release_batch(&fbatch);
kvfree(pages);
}
@@ -1150,8 +1163,8 @@ int drm_gem_pin(struct drm_gem_object *obj)
{
if (obj->funcs->pin)
return obj->funcs->pin(obj);
- else
- return 0;
+
+ return 0;
}
void drm_gem_unpin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index b8a615a138cd..3bdb6ba37ff4 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -168,8 +168,8 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev,
if (drm_drv_uses_atomic_modeset(dev) &&
!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
- drm_dbg(dev, "Unsupported pixel format %p4cc / modifier 0x%llx\n",
- &mode_cmd->pixel_format, mode_cmd->modifier[0]);
+ drm_dbg_kms(dev, "Unsupported pixel format %p4cc / modifier 0x%llx\n",
+ &mode_cmd->pixel_format, mode_cmd->modifier[0]);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 4ea6507a77e5..e435f986cd13 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -88,8 +88,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
if (ret)
goto err_release;
- mutex_init(&shmem->pages_lock);
- mutex_init(&shmem->vmap_lock);
INIT_LIST_HEAD(&shmem->madv_list);
if (!private) {
@@ -141,11 +139,13 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- drm_WARN_ON(obj->dev, shmem->vmap_use_count);
-
if (obj->import_attach) {
drm_prime_gem_destroy(obj, shmem->sgt);
} else {
+ dma_resv_lock(shmem->base.resv, NULL);
+
+ drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
DMA_BIDIRECTIONAL, 0);
@@ -154,22 +154,24 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
}
if (shmem->pages)
drm_gem_shmem_put_pages(shmem);
- }
- drm_WARN_ON(obj->dev, shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, shmem->pages_use_count);
+
+ dma_resv_unlock(shmem->base.resv);
+ }
drm_gem_object_release(obj);
- mutex_destroy(&shmem->pages_lock);
- mutex_destroy(&shmem->vmap_lock);
kfree(shmem);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
-static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct page **pages;
+ dma_resv_assert_held(shmem->base.resv);
+
if (shmem->pages_use_count++ > 0)
return 0;
@@ -197,35 +199,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
}
/*
- * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
+ * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
- * This function makes sure that backing pages exists for the shmem GEM object
- * and increases the use count.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
+ * This function decreases the use count and puts the backing pages when use drops to zero.
*/
-int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- int ret;
- drm_WARN_ON(obj->dev, obj->import_attach);
-
- ret = mutex_lock_interruptible(&shmem->pages_lock);
- if (ret)
- return ret;
- ret = drm_gem_shmem_get_pages_locked(shmem);
- mutex_unlock(&shmem->pages_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_shmem_get_pages);
-
-static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
-{
- struct drm_gem_object *obj = &shmem->base;
+ dma_resv_assert_held(shmem->base.resv);
if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
return;
@@ -243,20 +226,25 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
shmem->pages_mark_accessed_on_put);
shmem->pages = NULL;
}
+EXPORT_SYMBOL(drm_gem_shmem_put_pages);
-/*
- * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
- * @shmem: shmem GEM object
- *
- * This function decreases the use count and puts the backing pages when use drops to zero.
- */
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
+{
+ int ret;
+
+ dma_resv_assert_held(shmem->base.resv);
+
+ ret = drm_gem_shmem_get_pages(shmem);
+
+ return ret;
+}
+
+static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
- mutex_lock(&shmem->pages_lock);
- drm_gem_shmem_put_pages_locked(shmem);
- mutex_unlock(&shmem->pages_lock);
+ dma_resv_assert_held(shmem->base.resv);
+
+ drm_gem_shmem_put_pages(shmem);
}
-EXPORT_SYMBOL(drm_gem_shmem_put_pages);
/**
* drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
@@ -271,10 +259,17 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
+ int ret;
drm_WARN_ON(obj->dev, obj->import_attach);
- return drm_gem_shmem_get_pages(shmem);
+ ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
+ if (ret)
+ return ret;
+ ret = drm_gem_shmem_pin_locked(shmem);
+ dma_resv_unlock(shmem->base.resv);
+
+ return ret;
}
EXPORT_SYMBOL(drm_gem_shmem_pin);
@@ -291,12 +286,29 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, obj->import_attach);
- drm_gem_shmem_put_pages(shmem);
+ dma_resv_lock(shmem->base.resv, NULL);
+ drm_gem_shmem_unpin_locked(shmem);
+ dma_resv_unlock(shmem->base.resv);
}
EXPORT_SYMBOL(drm_gem_shmem_unpin);
-static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+/*
+ * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * @shmem: shmem GEM object
+ * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
+ * store.
+ *
+ * This function makes sure that a contiguous kernel virtual address mapping
+ * exists for the buffer backing the shmem GEM object. It hides the differences
+ * between dma-buf imported and natively allocated objects.
+ *
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
@@ -312,6 +324,8 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
} else {
pgprot_t prot = PAGE_KERNEL;
+ dma_resv_assert_held(shmem->base.resv);
+
if (shmem->vmap_use_count++ > 0) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
@@ -346,45 +360,30 @@ err_zero_use:
return ret;
}
+EXPORT_SYMBOL(drm_gem_shmem_vmap);
/*
- * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
- * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
- * store.
- *
- * This function makes sure that a contiguous kernel virtual address mapping
- * exists for the buffer backing the shmem GEM object. It hides the differences
- * between dma-buf imported and natively allocated objects.
+ * @map: Kernel virtual address where the SHMEM GEM object was mapped
*
- * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ * This function cleans up a kernel virtual address mapping acquired by
+ * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
+ * zero.
*
- * Returns:
- * 0 on success or a negative error code on failure.
+ * This function hides the differences between dma-buf imported and natively
+ * allocated objects.
*/
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&shmem->vmap_lock);
- if (ret)
- return ret;
- ret = drm_gem_shmem_vmap_locked(shmem, map);
- mutex_unlock(&shmem->vmap_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_shmem_vmap);
-
-static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
if (obj->import_attach) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
+ dma_resv_assert_held(shmem->base.resv);
+
if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
return;
@@ -397,26 +396,6 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
shmem->vaddr = NULL;
}
-
-/*
- * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
- * @shmem: shmem GEM object
- * @map: Kernel virtual address where the SHMEM GEM object was mapped
- *
- * This function cleans up a kernel virtual address mapping acquired by
- * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
- * zero.
- *
- * This function hides the differences between dma-buf imported and natively
- * allocated objects.
- */
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
-{
- mutex_lock(&shmem->vmap_lock);
- drm_gem_shmem_vunmap_locked(shmem, map);
- mutex_unlock(&shmem->vmap_lock);
-}
EXPORT_SYMBOL(drm_gem_shmem_vunmap);
static int
@@ -447,24 +426,24 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
*/
int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
{
- mutex_lock(&shmem->pages_lock);
+ dma_resv_assert_held(shmem->base.resv);
if (shmem->madv >= 0)
shmem->madv = madv;
madv = shmem->madv;
- mutex_unlock(&shmem->pages_lock);
-
return (madv >= 0);
}
EXPORT_SYMBOL(drm_gem_shmem_madvise);
-void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
+ dma_resv_assert_held(shmem->base.resv);
+
drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
@@ -472,7 +451,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
shmem->sgt = NULL;
- drm_gem_shmem_put_pages_locked(shmem);
+ drm_gem_shmem_put_pages(shmem);
shmem->madv = -1;
@@ -488,17 +467,6 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
}
-EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
-
-bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
-{
- if (!mutex_trylock(&shmem->pages_lock))
- return false;
- drm_gem_shmem_purge_locked(shmem);
- mutex_unlock(&shmem->pages_lock);
-
- return true;
-}
EXPORT_SYMBOL(drm_gem_shmem_purge);
/**
@@ -551,7 +519,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- mutex_lock(&shmem->pages_lock);
+ dma_resv_lock(shmem->base.resv, NULL);
if (page_offset >= num_pages ||
drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
@@ -563,7 +531,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
}
- mutex_unlock(&shmem->pages_lock);
+ dma_resv_unlock(shmem->base.resv);
return ret;
}
@@ -575,7 +543,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
drm_WARN_ON(obj->dev, obj->import_attach);
- mutex_lock(&shmem->pages_lock);
+ dma_resv_lock(shmem->base.resv, NULL);
/*
* We should have already pinned the pages when the buffer was first
@@ -585,7 +553,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
shmem->pages_use_count++;
- mutex_unlock(&shmem->pages_lock);
+ dma_resv_unlock(shmem->base.resv);
drm_gem_vm_open(vma);
}
@@ -595,7 +563,10 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_put_pages(shmem);
+ dma_resv_unlock(shmem->base.resv);
+
drm_gem_vm_close(vma);
}
@@ -623,7 +594,13 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
int ret;
if (obj->import_attach) {
+ /* Reset both vm_ops and vm_private_data, so we don't end up with
+ * vm_ops pointing to our implementation if the dma-buf backend
+ * doesn't set those fields.
+ */
vma->vm_private_data = NULL;
+ vma->vm_ops = NULL;
+
ret = dma_buf_mmap(obj->dma_buf, vma, 0);
/* Drop the reference drm_gem_mmap_obj() acquired.*/
@@ -633,7 +610,10 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return ret;
}
+ dma_resv_lock(shmem->base.resv, NULL);
ret = drm_gem_shmem_get_pages(shmem);
+ dma_resv_unlock(shmem->base.resv);
+
if (ret)
return ret;
@@ -699,7 +679,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
drm_WARN_ON(obj->dev, obj->import_attach);
- ret = drm_gem_shmem_get_pages_locked(shmem);
+ ret = drm_gem_shmem_get_pages(shmem);
if (ret)
return ERR_PTR(ret);
@@ -721,7 +701,7 @@ err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
- drm_gem_shmem_put_pages_locked(shmem);
+ drm_gem_shmem_put_pages(shmem);
return ERR_PTR(ret);
}
@@ -746,11 +726,11 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
int ret;
struct sg_table *sgt;
- ret = mutex_lock_interruptible(&shmem->pages_lock);
+ ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
return ERR_PTR(ret);
sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
- mutex_unlock(&shmem->pages_lock);
+ dma_resv_unlock(shmem->base.resv);
return sgt;
}
diff --git a/drivers/gpu/drm/drm_gpuva_mgr.c b/drivers/gpu/drm/drm_gpuva_mgr.c
new file mode 100644
index 000000000000..f86bfad74ff8
--- /dev/null
+++ b/drivers/gpu/drm/drm_gpuva_mgr.c
@@ -0,0 +1,1725 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Danilo Krummrich <[email protected]>
+ *
+ */
+
+#include <drm/drm_gpuva_mgr.h>
+
+#include <linux/interval_tree_generic.h>
+#include <linux/mm.h>
+
+/**
+ * DOC: Overview
+ *
+ * The DRM GPU VA Manager, represented by struct drm_gpuva_manager keeps track
+ * of a GPU's virtual address (VA) space and manages the corresponding virtual
+ * mappings represented by &drm_gpuva objects. It also keeps track of the
+ * mapping's backing &drm_gem_object buffers.
+ *
+ * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
+ * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
+ *
+ * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
+ * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
+ *
+ * The GPU VA manager internally uses a rb-tree to manage the
+ * &drm_gpuva mappings within a GPU's virtual address space.
+ *
+ * The &drm_gpuva_manager contains a special &drm_gpuva representing the
+ * portion of VA space reserved by the kernel. This node is initialized together
+ * with the GPU VA manager instance and removed when the GPU VA manager is
+ * destroyed.
+ *
+ * In a typical application drivers would embed struct drm_gpuva_manager and
+ * struct drm_gpuva within their own driver specific structures, there won't be
+ * any memory allocations of its own nor memory allocations of &drm_gpuva
+ * entries.
+ *
+ * The data structures needed to store &drm_gpuvas within the &drm_gpuva_manager
+ * are contained within struct drm_gpuva already. Hence, for inserting
+ * &drm_gpuva entries from within dma-fence signalling critical sections it is
+ * enough to pre-allocate the &drm_gpuva structures.
+ */
+
+/**
+ * DOC: Split and Merge
+ *
+ * Besides its capability to manage and represent a GPU VA space, the
+ * &drm_gpuva_manager also provides functions to let the &drm_gpuva_manager
+ * calculate a sequence of operations to satisfy a given map or unmap request.
+ *
+ * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
+ * and merging of existent GPU VA mappings with the ones that are requested to
+ * be mapped or unmapped. This feature is required by the Vulkan API to
+ * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
+ * as VM BIND.
+ *
+ * Drivers can call drm_gpuva_sm_map() to receive a sequence of callbacks
+ * containing map, unmap and remap operations for a given newly requested
+ * mapping. The sequence of callbacks represents the set of operations to
+ * execute in order to integrate the new mapping cleanly into the current state
+ * of the GPU VA space.
+ *
+ * Depending on how the new GPU VA mapping intersects with the existent mappings
+ * of the GPU VA space the &drm_gpuva_fn_ops callbacks contain an arbitrary
+ * amount of unmap operations, a maximum of two remap operations and a single
+ * map operation. The caller might receive no callback at all if no operation is
+ * required, e.g. if the requested mapping already exists in the exact same way.
+ *
+ * The single map operation represents the original map operation requested by
+ * the caller.
+ *
+ * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the
+ * &drm_gpuva to unmap is physically contiguous with the original mapping
+ * request. Optionally, if 'keep' is set, drivers may keep the actual page table
+ * entries for this &drm_gpuva, adding the missing page table entries only and
+ * update the &drm_gpuva_manager's view of things accordingly.
+ *
+ * Drivers may do the same optimization, namely delta page table updates, also
+ * for remap operations. This is possible since &drm_gpuva_op_remap consists of
+ * one unmap operation and one or two map operations, such that drivers can
+ * derive the page table update delta accordingly.
+ *
+ * Note that there can't be more than two existent mappings to split up, one at
+ * the beginning and one at the end of the new mapping, hence there is a
+ * maximum of two remap operations.
+ *
+ * Analogous to drm_gpuva_sm_map() drm_gpuva_sm_unmap() uses &drm_gpuva_fn_ops
+ * to call back into the driver in order to unmap a range of GPU VA space. The
+ * logic behind this function is way simpler though: For all existent mappings
+ * enclosed by the given range unmap operations are created. For mappings which
+ * are only partically located within the given range, remap operations are
+ * created such that those mappings are split up and re-mapped partically.
+ *
+ * As an alternative to drm_gpuva_sm_map() and drm_gpuva_sm_unmap(),
+ * drm_gpuva_sm_map_ops_create() and drm_gpuva_sm_unmap_ops_create() can be used
+ * to directly obtain an instance of struct drm_gpuva_ops containing a list of
+ * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list
+ * contains the &drm_gpuva_ops analogous to the callbacks one would receive when
+ * calling drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). While this way requires
+ * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to
+ * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory
+ * allocations are possible (e.g. to allocate GPU page tables) and once in the
+ * dma-fence signalling critical path.
+ *
+ * To update the &drm_gpuva_manager's view of the GPU VA space
+ * drm_gpuva_insert() and drm_gpuva_remove() may be used. These functions can
+ * safely be used from &drm_gpuva_fn_ops callbacks originating from
+ * drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). However, it might be more
+ * convenient to use the provided helper functions drm_gpuva_map(),
+ * drm_gpuva_remap() and drm_gpuva_unmap() instead.
+ *
+ * The following diagram depicts the basic relationships of existent GPU VA
+ * mappings, a newly requested mapping and the resulting mappings as implemented
+ * by drm_gpuva_sm_map() - it doesn't cover any arbitrary combinations of these.
+ *
+ * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs
+ * could be kept.
+ *
+ * ::
+ *
+ * 0 a 1
+ * old: |-----------| (bo_offset=n)
+ *
+ * 0 a 1
+ * req: |-----------| (bo_offset=n)
+ *
+ * 0 a 1
+ * new: |-----------| (bo_offset=n)
+ *
+ *
+ * 2) Requested mapping is identical, except for the BO offset, hence replace
+ * the mapping.
+ *
+ * ::
+ *
+ * 0 a 1
+ * old: |-----------| (bo_offset=n)
+ *
+ * 0 a 1
+ * req: |-----------| (bo_offset=m)
+ *
+ * 0 a 1
+ * new: |-----------| (bo_offset=m)
+ *
+ *
+ * 3) Requested mapping is identical, except for the backing BO, hence replace
+ * the mapping.
+ *
+ * ::
+ *
+ * 0 a 1
+ * old: |-----------| (bo_offset=n)
+ *
+ * 0 b 1
+ * req: |-----------| (bo_offset=n)
+ *
+ * 0 b 1
+ * new: |-----------| (bo_offset=n)
+ *
+ *
+ * 4) Existent mapping is a left aligned subset of the requested one, hence
+ * replace the existent one.
+ *
+ * ::
+ *
+ * 0 a 1
+ * old: |-----| (bo_offset=n)
+ *
+ * 0 a 2
+ * req: |-----------| (bo_offset=n)
+ *
+ * 0 a 2
+ * new: |-----------| (bo_offset=n)
+ *
+ * .. note::
+ * We expect to see the same result for a request with a different BO
+ * and/or non-contiguous BO offset.
+ *
+ *
+ * 5) Requested mapping's range is a left aligned subset of the existent one,
+ * but backed by a different BO. Hence, map the requested mapping and split
+ * the existent one adjusting its BO offset.
+ *
+ * ::
+ *
+ * 0 a 2
+ * old: |-----------| (bo_offset=n)
+ *
+ * 0 b 1
+ * req: |-----| (bo_offset=n)
+ *
+ * 0 b 1 a' 2
+ * new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
+ *
+ * .. note::
+ * We expect to see the same result for a request with a different BO
+ * and/or non-contiguous BO offset.
+ *
+ *
+ * 6) Existent mapping is a superset of the requested mapping. Split it up, but
+ * indicate that the backing PTEs could be kept.
+ *
+ * ::
+ *
+ * 0 a 2
+ * old: |-----------| (bo_offset=n)
+ *
+ * 0 a 1
+ * req: |-----| (bo_offset=n)
+ *
+ * 0 a 1 a' 2
+ * new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
+ *
+ *
+ * 7) Requested mapping's range is a right aligned subset of the existent one,
+ * but backed by a different BO. Hence, map the requested mapping and split
+ * the existent one, without adjusting the BO offset.
+ *
+ * ::
+ *
+ * 0 a 2
+ * old: |-----------| (bo_offset=n)
+ *
+ * 1 b 2
+ * req: |-----| (bo_offset=m)
+ *
+ * 0 a 1 b 2
+ * new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
+ *
+ *
+ * 8) Existent mapping is a superset of the requested mapping. Split it up, but
+ * indicate that the backing PTEs could be kept.
+ *
+ * ::
+ *
+ * 0 a 2
+ * old: |-----------| (bo_offset=n)
+ *
+ * 1 a 2
+ * req: |-----| (bo_offset=n+1)
+ *
+ * 0 a' 1 a 2
+ * new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
+ *
+ *
+ * 9) Existent mapping is overlapped at the end by the requested mapping backed
+ * by a different BO. Hence, map the requested mapping and split up the
+ * existent one, without adjusting the BO offset.
+ *
+ * ::
+ *
+ * 0 a 2
+ * old: |-----------| (bo_offset=n)
+ *
+ * 1 b 3
+ * req: |-----------| (bo_offset=m)
+ *
+ * 0 a 1 b 3
+ * new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
+ *
+ *
+ * 10) Existent mapping is overlapped by the requested mapping, both having the
+ * same backing BO with a contiguous offset. Indicate the backing PTEs of
+ * the old mapping could be kept.
+ *
+ * ::
+ *
+ * 0 a 2
+ * old: |-----------| (bo_offset=n)
+ *
+ * 1 a 3
+ * req: |-----------| (bo_offset=n+1)
+ *
+ * 0 a' 1 a 3
+ * new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
+ *
+ *
+ * 11) Requested mapping's range is a centered subset of the existent one
+ * having a different backing BO. Hence, map the requested mapping and split
+ * up the existent one in two mappings, adjusting the BO offset of the right
+ * one accordingly.
+ *
+ * ::
+ *
+ * 0 a 3
+ * old: |-----------------| (bo_offset=n)
+ *
+ * 1 b 2
+ * req: |-----| (bo_offset=m)
+ *
+ * 0 a 1 b 2 a' 3
+ * new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
+ *
+ *
+ * 12) Requested mapping is a contiguous subset of the existent one. Split it
+ * up, but indicate that the backing PTEs could be kept.
+ *
+ * ::
+ *
+ * 0 a 3
+ * old: |-----------------| (bo_offset=n)
+ *
+ * 1 a 2
+ * req: |-----| (bo_offset=n+1)
+ *
+ * 0 a' 1 a 2 a'' 3
+ * old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
+ *
+ *
+ * 13) Existent mapping is a right aligned subset of the requested one, hence
+ * replace the existent one.
+ *
+ * ::
+ *
+ * 1 a 2
+ * old: |-----| (bo_offset=n+1)
+ *
+ * 0 a 2
+ * req: |-----------| (bo_offset=n)
+ *
+ * 0 a 2
+ * new: |-----------| (bo_offset=n)
+ *
+ * .. note::
+ * We expect to see the same result for a request with a different bo
+ * and/or non-contiguous bo_offset.
+ *
+ *
+ * 14) Existent mapping is a centered subset of the requested one, hence
+ * replace the existent one.
+ *
+ * ::
+ *
+ * 1 a 2
+ * old: |-----| (bo_offset=n+1)
+ *
+ * 0 a 3
+ * req: |----------------| (bo_offset=n)
+ *
+ * 0 a 3
+ * new: |----------------| (bo_offset=n)
+ *
+ * .. note::
+ * We expect to see the same result for a request with a different bo
+ * and/or non-contiguous bo_offset.
+ *
+ *
+ * 15) Existent mappings is overlapped at the beginning by the requested mapping
+ * backed by a different BO. Hence, map the requested mapping and split up
+ * the existent one, adjusting its BO offset accordingly.
+ *
+ * ::
+ *
+ * 1 a 3
+ * old: |-----------| (bo_offset=n)
+ *
+ * 0 b 2
+ * req: |-----------| (bo_offset=m)
+ *
+ * 0 b 2 a' 3
+ * new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
+ */
+
+/**
+ * DOC: Locking
+ *
+ * Generally, the GPU VA manager does not take care of locking itself, it is
+ * the drivers responsibility to take care about locking. Drivers might want to
+ * protect the following operations: inserting, removing and iterating
+ * &drm_gpuva objects as well as generating all kinds of operations, such as
+ * split / merge or prefetch.
+ *
+ * The GPU VA manager also does not take care of the locking of the backing
+ * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to
+ * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively
+ * a driver specific external lock. For the latter see also
+ * drm_gem_gpuva_set_lock().
+ *
+ * However, the GPU VA manager contains lockdep checks to ensure callers of its
+ * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is
+ * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink().
+ */
+
+/**
+ * DOC: Examples
+ *
+ * This section gives two examples on how to let the DRM GPUVA Manager generate
+ * &drm_gpuva_op in order to satisfy a given map or unmap request and how to
+ * make use of them.
+ *
+ * The below code is strictly limited to illustrate the generic usage pattern.
+ * To maintain simplicitly, it doesn't make use of any abstractions for common
+ * code, different (asyncronous) stages with fence signalling critical paths,
+ * any other helpers or error handling in terms of freeing memory and dropping
+ * previously taken locks.
+ *
+ * 1) Obtain a list of &drm_gpuva_op to create a new mapping::
+ *
+ * // Allocates a new &drm_gpuva.
+ * struct drm_gpuva * driver_gpuva_alloc(void);
+ *
+ * // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
+ * // structure in individual driver structures and lock the dma-resv with
+ * // drm_exec or similar helpers.
+ * int driver_mapping_create(struct drm_gpuva_manager *mgr,
+ * u64 addr, u64 range,
+ * struct drm_gem_object *obj, u64 offset)
+ * {
+ * struct drm_gpuva_ops *ops;
+ * struct drm_gpuva_op *op
+ *
+ * driver_lock_va_space();
+ * ops = drm_gpuva_sm_map_ops_create(mgr, addr, range,
+ * obj, offset);
+ * if (IS_ERR(ops))
+ * return PTR_ERR(ops);
+ *
+ * drm_gpuva_for_each_op(op, ops) {
+ * struct drm_gpuva *va;
+ *
+ * switch (op->op) {
+ * case DRM_GPUVA_OP_MAP:
+ * va = driver_gpuva_alloc();
+ * if (!va)
+ * ; // unwind previous VA space updates,
+ * // free memory and unlock
+ *
+ * driver_vm_map();
+ * drm_gpuva_map(mgr, va, &op->map);
+ * drm_gpuva_link(va);
+ *
+ * break;
+ * case DRM_GPUVA_OP_REMAP: {
+ * struct drm_gpuva *prev = NULL, *next = NULL;
+ *
+ * va = op->remap.unmap->va;
+ *
+ * if (op->remap.prev) {
+ * prev = driver_gpuva_alloc();
+ * if (!prev)
+ * ; // unwind previous VA space
+ * // updates, free memory and
+ * // unlock
+ * }
+ *
+ * if (op->remap.next) {
+ * next = driver_gpuva_alloc();
+ * if (!next)
+ * ; // unwind previous VA space
+ * // updates, free memory and
+ * // unlock
+ * }
+ *
+ * driver_vm_remap();
+ * drm_gpuva_remap(prev, next, &op->remap);
+ *
+ * drm_gpuva_unlink(va);
+ * if (prev)
+ * drm_gpuva_link(prev);
+ * if (next)
+ * drm_gpuva_link(next);
+ *
+ * break;
+ * }
+ * case DRM_GPUVA_OP_UNMAP:
+ * va = op->unmap->va;
+ *
+ * driver_vm_unmap();
+ * drm_gpuva_unlink(va);
+ * drm_gpuva_unmap(&op->unmap);
+ *
+ * break;
+ * default:
+ * break;
+ * }
+ * }
+ * driver_unlock_va_space();
+ *
+ * return 0;
+ * }
+ *
+ * 2) Receive a callback for each &drm_gpuva_op to create a new mapping::
+ *
+ * struct driver_context {
+ * struct drm_gpuva_manager *mgr;
+ * struct drm_gpuva *new_va;
+ * struct drm_gpuva *prev_va;
+ * struct drm_gpuva *next_va;
+ * };
+ *
+ * // ops to pass to drm_gpuva_manager_init()
+ * static const struct drm_gpuva_fn_ops driver_gpuva_ops = {
+ * .sm_step_map = driver_gpuva_map,
+ * .sm_step_remap = driver_gpuva_remap,
+ * .sm_step_unmap = driver_gpuva_unmap,
+ * };
+ *
+ * // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
+ * // structure in individual driver structures and lock the dma-resv with
+ * // drm_exec or similar helpers.
+ * int driver_mapping_create(struct drm_gpuva_manager *mgr,
+ * u64 addr, u64 range,
+ * struct drm_gem_object *obj, u64 offset)
+ * {
+ * struct driver_context ctx;
+ * struct drm_gpuva_ops *ops;
+ * struct drm_gpuva_op *op;
+ * int ret = 0;
+ *
+ * ctx.mgr = mgr;
+ *
+ * ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
+ * ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
+ * ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
+ * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) {
+ * ret = -ENOMEM;
+ * goto out;
+ * }
+ *
+ * driver_lock_va_space();
+ * ret = drm_gpuva_sm_map(mgr, &ctx, addr, range, obj, offset);
+ * driver_unlock_va_space();
+ *
+ * out:
+ * kfree(ctx.new_va);
+ * kfree(ctx.prev_va);
+ * kfree(ctx.next_va);
+ * return ret;
+ * }
+ *
+ * int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx)
+ * {
+ * struct driver_context *ctx = __ctx;
+ *
+ * drm_gpuva_map(ctx->mgr, ctx->new_va, &op->map);
+ *
+ * drm_gpuva_link(ctx->new_va);
+ *
+ * // prevent the new GPUVA from being freed in
+ * // driver_mapping_create()
+ * ctx->new_va = NULL;
+ *
+ * return 0;
+ * }
+ *
+ * int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
+ * {
+ * struct driver_context *ctx = __ctx;
+ *
+ * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
+ *
+ * drm_gpuva_unlink(op->remap.unmap->va);
+ * kfree(op->remap.unmap->va);
+ *
+ * if (op->remap.prev) {
+ * drm_gpuva_link(ctx->prev_va);
+ * ctx->prev_va = NULL;
+ * }
+ *
+ * if (op->remap.next) {
+ * drm_gpuva_link(ctx->next_va);
+ * ctx->next_va = NULL;
+ * }
+ *
+ * return 0;
+ * }
+ *
+ * int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx)
+ * {
+ * drm_gpuva_unlink(op->unmap.va);
+ * drm_gpuva_unmap(&op->unmap);
+ * kfree(op->unmap.va);
+ *
+ * return 0;
+ * }
+ */
+
+#define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node)
+
+#define GPUVA_START(node) ((node)->va.addr)
+#define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
+
+/* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain
+ * about this.
+ */
+INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
+ GPUVA_START, GPUVA_LAST, static __maybe_unused,
+ drm_gpuva_it)
+
+static int __drm_gpuva_insert(struct drm_gpuva_manager *mgr,
+ struct drm_gpuva *va);
+static void __drm_gpuva_remove(struct drm_gpuva *va);
+
+static bool
+drm_gpuva_check_overflow(u64 addr, u64 range)
+{
+ u64 end;
+
+ return WARN(check_add_overflow(addr, range, &end),
+ "GPUVA address limited to %zu bytes.\n", sizeof(end));
+}
+
+static bool
+drm_gpuva_in_mm_range(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
+{
+ u64 end = addr + range;
+ u64 mm_start = mgr->mm_start;
+ u64 mm_end = mm_start + mgr->mm_range;
+
+ return addr >= mm_start && end <= mm_end;
+}
+
+static bool
+drm_gpuva_in_kernel_node(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
+{
+ u64 end = addr + range;
+ u64 kstart = mgr->kernel_alloc_node.va.addr;
+ u64 krange = mgr->kernel_alloc_node.va.range;
+ u64 kend = kstart + krange;
+
+ return krange && addr < kend && kstart < end;
+}
+
+static bool
+drm_gpuva_range_valid(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range)
+{
+ return !drm_gpuva_check_overflow(addr, range) &&
+ drm_gpuva_in_mm_range(mgr, addr, range) &&
+ !drm_gpuva_in_kernel_node(mgr, addr, range);
+}
+
+/**
+ * drm_gpuva_manager_init() - initialize a &drm_gpuva_manager
+ * @mgr: pointer to the &drm_gpuva_manager to initialize
+ * @name: the name of the GPU VA space
+ * @start_offset: the start offset of the GPU VA space
+ * @range: the size of the GPU VA space
+ * @reserve_offset: the start of the kernel reserved GPU VA area
+ * @reserve_range: the size of the kernel reserved GPU VA area
+ * @ops: &drm_gpuva_fn_ops called on &drm_gpuva_sm_map / &drm_gpuva_sm_unmap
+ *
+ * The &drm_gpuva_manager must be initialized with this function before use.
+ *
+ * Note that @mgr must be cleared to 0 before calling this function. The given
+ * &name is expected to be managed by the surrounding driver structures.
+ */
+void
+drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
+ const char *name,
+ u64 start_offset, u64 range,
+ u64 reserve_offset, u64 reserve_range,
+ const struct drm_gpuva_fn_ops *ops)
+{
+ mgr->rb.tree = RB_ROOT_CACHED;
+ INIT_LIST_HEAD(&mgr->rb.list);
+
+ drm_gpuva_check_overflow(start_offset, range);
+ mgr->mm_start = start_offset;
+ mgr->mm_range = range;
+
+ mgr->name = name ? name : "unknown";
+ mgr->ops = ops;
+
+ memset(&mgr->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
+
+ if (reserve_range) {
+ mgr->kernel_alloc_node.va.addr = reserve_offset;
+ mgr->kernel_alloc_node.va.range = reserve_range;
+
+ if (likely(!drm_gpuva_check_overflow(reserve_offset,
+ reserve_range)))
+ __drm_gpuva_insert(mgr, &mgr->kernel_alloc_node);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_manager_init);
+
+/**
+ * drm_gpuva_manager_destroy() - cleanup a &drm_gpuva_manager
+ * @mgr: pointer to the &drm_gpuva_manager to clean up
+ *
+ * Note that it is a bug to call this function on a manager that still
+ * holds GPU VA mappings.
+ */
+void
+drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr)
+{
+ mgr->name = NULL;
+
+ if (mgr->kernel_alloc_node.va.range)
+ __drm_gpuva_remove(&mgr->kernel_alloc_node);
+
+ WARN(!RB_EMPTY_ROOT(&mgr->rb.tree.rb_root),
+ "GPUVA tree is not empty, potentially leaking memory.");
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_manager_destroy);
+
+static int
+__drm_gpuva_insert(struct drm_gpuva_manager *mgr,
+ struct drm_gpuva *va)
+{
+ struct rb_node *node;
+ struct list_head *head;
+
+ if (drm_gpuva_it_iter_first(&mgr->rb.tree,
+ GPUVA_START(va),
+ GPUVA_LAST(va)))
+ return -EEXIST;
+
+ va->mgr = mgr;
+
+ drm_gpuva_it_insert(va, &mgr->rb.tree);
+
+ node = rb_prev(&va->rb.node);
+ if (node)
+ head = &(to_drm_gpuva(node))->rb.entry;
+ else
+ head = &mgr->rb.list;
+
+ list_add(&va->rb.entry, head);
+
+ return 0;
+}
+
+/**
+ * drm_gpuva_insert() - insert a &drm_gpuva
+ * @mgr: the &drm_gpuva_manager to insert the &drm_gpuva in
+ * @va: the &drm_gpuva to insert
+ *
+ * Insert a &drm_gpuva with a given address and range into a
+ * &drm_gpuva_manager.
+ *
+ * It is safe to use this function using the safe versions of iterating the GPU
+ * VA space, such as drm_gpuva_for_each_va_safe() and
+ * drm_gpuva_for_each_va_range_safe().
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuva_insert(struct drm_gpuva_manager *mgr,
+ struct drm_gpuva *va)
+{
+ u64 addr = va->va.addr;
+ u64 range = va->va.range;
+
+ if (unlikely(!drm_gpuva_range_valid(mgr, addr, range)))
+ return -EINVAL;
+
+ return __drm_gpuva_insert(mgr, va);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_insert);
+
+static void
+__drm_gpuva_remove(struct drm_gpuva *va)
+{
+ drm_gpuva_it_remove(va, &va->mgr->rb.tree);
+ list_del_init(&va->rb.entry);
+}
+
+/**
+ * drm_gpuva_remove() - remove a &drm_gpuva
+ * @va: the &drm_gpuva to remove
+ *
+ * This removes the given &va from the underlaying tree.
+ *
+ * It is safe to use this function using the safe versions of iterating the GPU
+ * VA space, such as drm_gpuva_for_each_va_safe() and
+ * drm_gpuva_for_each_va_range_safe().
+ */
+void
+drm_gpuva_remove(struct drm_gpuva *va)
+{
+ struct drm_gpuva_manager *mgr = va->mgr;
+
+ if (unlikely(va == &mgr->kernel_alloc_node)) {
+ WARN(1, "Can't destroy kernel reserved node.\n");
+ return;
+ }
+
+ __drm_gpuva_remove(va);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_remove);
+
+/**
+ * drm_gpuva_link() - link a &drm_gpuva
+ * @va: the &drm_gpuva to link
+ *
+ * This adds the given &va to the GPU VA list of the &drm_gem_object it is
+ * associated with.
+ *
+ * This function expects the caller to protect the GEM's GPUVA list against
+ * concurrent access using the GEMs dma_resv lock.
+ */
+void
+drm_gpuva_link(struct drm_gpuva *va)
+{
+ struct drm_gem_object *obj = va->gem.obj;
+
+ if (unlikely(!obj))
+ return;
+
+ drm_gem_gpuva_assert_lock_held(obj);
+
+ list_add_tail(&va->gem.entry, &obj->gpuva.list);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_link);
+
+/**
+ * drm_gpuva_unlink() - unlink a &drm_gpuva
+ * @va: the &drm_gpuva to unlink
+ *
+ * This removes the given &va from the GPU VA list of the &drm_gem_object it is
+ * associated with.
+ *
+ * This function expects the caller to protect the GEM's GPUVA list against
+ * concurrent access using the GEMs dma_resv lock.
+ */
+void
+drm_gpuva_unlink(struct drm_gpuva *va)
+{
+ struct drm_gem_object *obj = va->gem.obj;
+
+ if (unlikely(!obj))
+ return;
+
+ drm_gem_gpuva_assert_lock_held(obj);
+
+ list_del_init(&va->gem.entry);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
+
+/**
+ * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
+ * @mgr: the &drm_gpuva_manager to search in
+ * @addr: the &drm_gpuvas address
+ * @range: the &drm_gpuvas range
+ *
+ * Returns: the first &drm_gpuva within the given range
+ */
+struct drm_gpuva *
+drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range)
+{
+ u64 last = addr + range - 1;
+
+ return drm_gpuva_it_iter_first(&mgr->rb.tree, addr, last);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_find_first);
+
+/**
+ * drm_gpuva_find() - find a &drm_gpuva
+ * @mgr: the &drm_gpuva_manager to search in
+ * @addr: the &drm_gpuvas address
+ * @range: the &drm_gpuvas range
+ *
+ * Returns: the &drm_gpuva at a given &addr and with a given &range
+ */
+struct drm_gpuva *
+drm_gpuva_find(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range)
+{
+ struct drm_gpuva *va;
+
+ va = drm_gpuva_find_first(mgr, addr, range);
+ if (!va)
+ goto out;
+
+ if (va->va.addr != addr ||
+ va->va.range != range)
+ goto out;
+
+ return va;
+
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_find);
+
+/**
+ * drm_gpuva_find_prev() - find the &drm_gpuva before the given address
+ * @mgr: the &drm_gpuva_manager to search in
+ * @start: the given GPU VA's start address
+ *
+ * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
+ *
+ * Note that if there is any free space between the GPU VA mappings no mapping
+ * is returned.
+ *
+ * Returns: a pointer to the found &drm_gpuva or NULL if none was found
+ */
+struct drm_gpuva *
+drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start)
+{
+ if (!drm_gpuva_range_valid(mgr, start - 1, 1))
+ return NULL;
+
+ return drm_gpuva_it_iter_first(&mgr->rb.tree, start - 1, start);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
+
+/**
+ * drm_gpuva_find_next() - find the &drm_gpuva after the given address
+ * @mgr: the &drm_gpuva_manager to search in
+ * @end: the given GPU VA's end address
+ *
+ * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
+ *
+ * Note that if there is any free space between the GPU VA mappings no mapping
+ * is returned.
+ *
+ * Returns: a pointer to the found &drm_gpuva or NULL if none was found
+ */
+struct drm_gpuva *
+drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end)
+{
+ if (!drm_gpuva_range_valid(mgr, end, 1))
+ return NULL;
+
+ return drm_gpuva_it_iter_first(&mgr->rb.tree, end, end + 1);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_find_next);
+
+/**
+ * drm_gpuva_interval_empty() - indicate whether a given interval of the VA space
+ * is empty
+ * @mgr: the &drm_gpuva_manager to check the range for
+ * @addr: the start address of the range
+ * @range: the range of the interval
+ *
+ * Returns: true if the interval is empty, false otherwise
+ */
+bool
+drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
+{
+ return !drm_gpuva_find_first(mgr, addr, range);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_interval_empty);
+
+/**
+ * drm_gpuva_map() - helper to insert a &drm_gpuva according to a
+ * &drm_gpuva_op_map
+ * @mgr: the &drm_gpuva_manager
+ * @va: the &drm_gpuva to insert
+ * @op: the &drm_gpuva_op_map to initialize @va with
+ *
+ * Initializes the @va from the @op and inserts it into the given @mgr.
+ */
+void
+drm_gpuva_map(struct drm_gpuva_manager *mgr,
+ struct drm_gpuva *va,
+ struct drm_gpuva_op_map *op)
+{
+ drm_gpuva_init_from_op(va, op);
+ drm_gpuva_insert(mgr, va);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_map);
+
+/**
+ * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
+ * &drm_gpuva_op_remap
+ * @prev: the &drm_gpuva to remap when keeping the start of a mapping
+ * @next: the &drm_gpuva to remap when keeping the end of a mapping
+ * @op: the &drm_gpuva_op_remap to initialize @prev and @next with
+ *
+ * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or
+ * @next.
+ */
+void
+drm_gpuva_remap(struct drm_gpuva *prev,
+ struct drm_gpuva *next,
+ struct drm_gpuva_op_remap *op)
+{
+ struct drm_gpuva *curr = op->unmap->va;
+ struct drm_gpuva_manager *mgr = curr->mgr;
+
+ drm_gpuva_remove(curr);
+
+ if (op->prev) {
+ drm_gpuva_init_from_op(prev, op->prev);
+ drm_gpuva_insert(mgr, prev);
+ }
+
+ if (op->next) {
+ drm_gpuva_init_from_op(next, op->next);
+ drm_gpuva_insert(mgr, next);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_remap);
+
+/**
+ * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
+ * &drm_gpuva_op_unmap
+ * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove
+ *
+ * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap.
+ */
+void
+drm_gpuva_unmap(struct drm_gpuva_op_unmap *op)
+{
+ drm_gpuva_remove(op->va);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
+
+static int
+op_map_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
+ u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset)
+{
+ struct drm_gpuva_op op = {};
+
+ op.op = DRM_GPUVA_OP_MAP;
+ op.map.va.addr = addr;
+ op.map.va.range = range;
+ op.map.gem.obj = obj;
+ op.map.gem.offset = offset;
+
+ return fn->sm_step_map(&op, priv);
+}
+
+static int
+op_remap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
+ struct drm_gpuva_op_map *prev,
+ struct drm_gpuva_op_map *next,
+ struct drm_gpuva_op_unmap *unmap)
+{
+ struct drm_gpuva_op op = {};
+ struct drm_gpuva_op_remap *r;
+
+ op.op = DRM_GPUVA_OP_REMAP;
+ r = &op.remap;
+ r->prev = prev;
+ r->next = next;
+ r->unmap = unmap;
+
+ return fn->sm_step_remap(&op, priv);
+}
+
+static int
+op_unmap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
+ struct drm_gpuva *va, bool merge)
+{
+ struct drm_gpuva_op op = {};
+
+ op.op = DRM_GPUVA_OP_UNMAP;
+ op.unmap.va = va;
+ op.unmap.keep = merge;
+
+ return fn->sm_step_unmap(&op, priv);
+}
+
+static int
+__drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
+ const struct drm_gpuva_fn_ops *ops, void *priv,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *req_obj, u64 req_offset)
+{
+ struct drm_gpuva *va, *next, *prev = NULL;
+ u64 req_end = req_addr + req_range;
+ int ret;
+
+ if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
+ return -EINVAL;
+
+ drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
+ struct drm_gem_object *obj = va->gem.obj;
+ u64 offset = va->gem.offset;
+ u64 addr = va->va.addr;
+ u64 range = va->va.range;
+ u64 end = addr + range;
+ bool merge = !!va->gem.obj;
+
+ if (addr == req_addr) {
+ merge &= obj == req_obj &&
+ offset == req_offset;
+
+ if (end == req_end) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ if (end < req_end) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ goto next;
+ }
+
+ if (end > req_end) {
+ struct drm_gpuva_op_map n = {
+ .va.addr = req_end,
+ .va.range = range - req_range,
+ .gem.obj = obj,
+ .gem.offset = offset + req_range,
+ };
+ struct drm_gpuva_op_unmap u = {
+ .va = va,
+ .keep = merge,
+ };
+
+ ret = op_remap_cb(ops, priv, NULL, &n, &u);
+ if (ret)
+ return ret;
+ break;
+ }
+ } else if (addr < req_addr) {
+ u64 ls_range = req_addr - addr;
+ struct drm_gpuva_op_map p = {
+ .va.addr = addr,
+ .va.range = ls_range,
+ .gem.obj = obj,
+ .gem.offset = offset,
+ };
+ struct drm_gpuva_op_unmap u = { .va = va };
+
+ merge &= obj == req_obj &&
+ offset + ls_range == req_offset;
+ u.keep = merge;
+
+ if (end == req_end) {
+ ret = op_remap_cb(ops, priv, &p, NULL, &u);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ if (end < req_end) {
+ ret = op_remap_cb(ops, priv, &p, NULL, &u);
+ if (ret)
+ return ret;
+ goto next;
+ }
+
+ if (end > req_end) {
+ struct drm_gpuva_op_map n = {
+ .va.addr = req_end,
+ .va.range = end - req_end,
+ .gem.obj = obj,
+ .gem.offset = offset + ls_range +
+ req_range,
+ };
+
+ ret = op_remap_cb(ops, priv, &p, &n, &u);
+ if (ret)
+ return ret;
+ break;
+ }
+ } else if (addr > req_addr) {
+ merge &= obj == req_obj &&
+ offset == req_offset +
+ (addr - req_addr);
+
+ if (end == req_end) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ if (end < req_end) {
+ ret = op_unmap_cb(ops, priv, va, merge);
+ if (ret)
+ return ret;
+ goto next;
+ }
+
+ if (end > req_end) {
+ struct drm_gpuva_op_map n = {
+ .va.addr = req_end,
+ .va.range = end - req_end,
+ .gem.obj = obj,
+ .gem.offset = offset + req_end - addr,
+ };
+ struct drm_gpuva_op_unmap u = {
+ .va = va,
+ .keep = merge,
+ };
+
+ ret = op_remap_cb(ops, priv, NULL, &n, &u);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+next:
+ prev = va;
+ }
+
+ return op_map_cb(ops, priv,
+ req_addr, req_range,
+ req_obj, req_offset);
+}
+
+static int
+__drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr,
+ const struct drm_gpuva_fn_ops *ops, void *priv,
+ u64 req_addr, u64 req_range)
+{
+ struct drm_gpuva *va, *next;
+ u64 req_end = req_addr + req_range;
+ int ret;
+
+ if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
+ return -EINVAL;
+
+ drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
+ struct drm_gpuva_op_map prev = {}, next = {};
+ bool prev_split = false, next_split = false;
+ struct drm_gem_object *obj = va->gem.obj;
+ u64 offset = va->gem.offset;
+ u64 addr = va->va.addr;
+ u64 range = va->va.range;
+ u64 end = addr + range;
+
+ if (addr < req_addr) {
+ prev.va.addr = addr;
+ prev.va.range = req_addr - addr;
+ prev.gem.obj = obj;
+ prev.gem.offset = offset;
+
+ prev_split = true;
+ }
+
+ if (end > req_end) {
+ next.va.addr = req_end;
+ next.va.range = end - req_end;
+ next.gem.obj = obj;
+ next.gem.offset = offset + (req_end - addr);
+
+ next_split = true;
+ }
+
+ if (prev_split || next_split) {
+ struct drm_gpuva_op_unmap unmap = { .va = va };
+
+ ret = op_remap_cb(ops, priv,
+ prev_split ? &prev : NULL,
+ next_split ? &next : NULL,
+ &unmap);
+ if (ret)
+ return ret;
+ } else {
+ ret = op_unmap_cb(ops, priv, va, false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * drm_gpuva_sm_map() - creates the &drm_gpuva_op split/merge steps
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @req_addr: the start address of the new mapping
+ * @req_range: the range of the new mapping
+ * @req_obj: the &drm_gem_object to map
+ * @req_offset: the offset within the &drm_gem_object
+ * @priv: pointer to a driver private data structure
+ *
+ * This function iterates the given range of the GPU VA space. It utilizes the
+ * &drm_gpuva_fn_ops to call back into the driver providing the split and merge
+ * steps.
+ *
+ * Drivers may use these callbacks to update the GPU VA space right away within
+ * the callback. In case the driver decides to copy and store the operations for
+ * later processing neither this function nor &drm_gpuva_sm_unmap is allowed to
+ * be called before the &drm_gpuva_manager's view of the GPU VA space was
+ * updated with the previous set of operations. To update the
+ * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
+ * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
+ * used.
+ *
+ * A sequence of callbacks can contain map, unmap and remap operations, but
+ * the sequence of callbacks might also be empty if no operation is required,
+ * e.g. if the requested mapping already exists in the exact same way.
+ *
+ * There can be an arbitrary amount of unmap operations, a maximum of two remap
+ * operations and a single map operation. The latter one represents the original
+ * map operation requested by the caller.
+ *
+ * Returns: 0 on success or a negative error code
+ */
+int
+drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *req_obj, u64 req_offset)
+{
+ const struct drm_gpuva_fn_ops *ops = mgr->ops;
+
+ if (unlikely(!(ops && ops->sm_step_map &&
+ ops->sm_step_remap &&
+ ops->sm_step_unmap)))
+ return -EINVAL;
+
+ return __drm_gpuva_sm_map(mgr, ops, priv,
+ req_addr, req_range,
+ req_obj, req_offset);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_sm_map);
+
+/**
+ * drm_gpuva_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @priv: pointer to a driver private data structure
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ *
+ * This function iterates the given range of the GPU VA space. It utilizes the
+ * &drm_gpuva_fn_ops to call back into the driver providing the operations to
+ * unmap and, if required, split existent mappings.
+ *
+ * Drivers may use these callbacks to update the GPU VA space right away within
+ * the callback. In case the driver decides to copy and store the operations for
+ * later processing neither this function nor &drm_gpuva_sm_map is allowed to be
+ * called before the &drm_gpuva_manager's view of the GPU VA space was updated
+ * with the previous set of operations. To update the &drm_gpuva_manager's view
+ * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
+ * drm_gpuva_destroy_unlocked() should be used.
+ *
+ * A sequence of callbacks can contain unmap and remap operations, depending on
+ * whether there are actual overlapping mappings to split.
+ *
+ * There can be an arbitrary amount of unmap operations and a maximum of two
+ * remap operations.
+ *
+ * Returns: 0 on success or a negative error code
+ */
+int
+drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
+ u64 req_addr, u64 req_range)
+{
+ const struct drm_gpuva_fn_ops *ops = mgr->ops;
+
+ if (unlikely(!(ops && ops->sm_step_remap &&
+ ops->sm_step_unmap)))
+ return -EINVAL;
+
+ return __drm_gpuva_sm_unmap(mgr, ops, priv,
+ req_addr, req_range);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap);
+
+static struct drm_gpuva_op *
+gpuva_op_alloc(struct drm_gpuva_manager *mgr)
+{
+ const struct drm_gpuva_fn_ops *fn = mgr->ops;
+ struct drm_gpuva_op *op;
+
+ if (fn && fn->op_alloc)
+ op = fn->op_alloc();
+ else
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+
+ if (unlikely(!op))
+ return NULL;
+
+ return op;
+}
+
+static void
+gpuva_op_free(struct drm_gpuva_manager *mgr,
+ struct drm_gpuva_op *op)
+{
+ const struct drm_gpuva_fn_ops *fn = mgr->ops;
+
+ if (fn && fn->op_free)
+ fn->op_free(op);
+ else
+ kfree(op);
+}
+
+static int
+drm_gpuva_sm_step(struct drm_gpuva_op *__op,
+ void *priv)
+{
+ struct {
+ struct drm_gpuva_manager *mgr;
+ struct drm_gpuva_ops *ops;
+ } *args = priv;
+ struct drm_gpuva_manager *mgr = args->mgr;
+ struct drm_gpuva_ops *ops = args->ops;
+ struct drm_gpuva_op *op;
+
+ op = gpuva_op_alloc(mgr);
+ if (unlikely(!op))
+ goto err;
+
+ memcpy(op, __op, sizeof(*op));
+
+ if (op->op == DRM_GPUVA_OP_REMAP) {
+ struct drm_gpuva_op_remap *__r = &__op->remap;
+ struct drm_gpuva_op_remap *r = &op->remap;
+
+ r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap),
+ GFP_KERNEL);
+ if (unlikely(!r->unmap))
+ goto err_free_op;
+
+ if (__r->prev) {
+ r->prev = kmemdup(__r->prev, sizeof(*r->prev),
+ GFP_KERNEL);
+ if (unlikely(!r->prev))
+ goto err_free_unmap;
+ }
+
+ if (__r->next) {
+ r->next = kmemdup(__r->next, sizeof(*r->next),
+ GFP_KERNEL);
+ if (unlikely(!r->next))
+ goto err_free_prev;
+ }
+ }
+
+ list_add_tail(&op->entry, &ops->list);
+
+ return 0;
+
+err_free_unmap:
+ kfree(op->remap.unmap);
+err_free_prev:
+ kfree(op->remap.prev);
+err_free_op:
+ gpuva_op_free(mgr, op);
+err:
+ return -ENOMEM;
+}
+
+static const struct drm_gpuva_fn_ops gpuva_list_ops = {
+ .sm_step_map = drm_gpuva_sm_step,
+ .sm_step_remap = drm_gpuva_sm_step,
+ .sm_step_unmap = drm_gpuva_sm_step,
+};
+
+/**
+ * drm_gpuva_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @req_addr: the start address of the new mapping
+ * @req_range: the range of the new mapping
+ * @req_obj: the &drm_gem_object to map
+ * @req_offset: the offset within the &drm_gem_object
+ *
+ * This function creates a list of operations to perform splitting and merging
+ * of existent mapping(s) with the newly requested one.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and must be processed
+ * in the given order. It can contain map, unmap and remap operations, but it
+ * also can be empty if no operation is required, e.g. if the requested mapping
+ * already exists is the exact same way.
+ *
+ * There can be an arbitrary amount of unmap operations, a maximum of two remap
+ * operations and a single map operation. The latter one represents the original
+ * map operation requested by the caller.
+ *
+ * Note that before calling this function again with another mapping request it
+ * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
+ * previously obtained operations must be either processed or abandoned. To
+ * update the &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
+ * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
+ * used.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *req_obj, u64 req_offset)
+{
+ struct drm_gpuva_ops *ops;
+ struct {
+ struct drm_gpuva_manager *mgr;
+ struct drm_gpuva_ops *ops;
+ } args;
+ int ret;
+
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (unlikely(!ops))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ops->list);
+
+ args.mgr = mgr;
+ args.ops = ops;
+
+ ret = __drm_gpuva_sm_map(mgr, &gpuva_list_ops, &args,
+ req_addr, req_range,
+ req_obj, req_offset);
+ if (ret)
+ goto err_free_ops;
+
+ return ops;
+
+err_free_ops:
+ drm_gpuva_ops_free(mgr, ops);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_sm_map_ops_create);
+
+/**
+ * drm_gpuva_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
+ * unmap
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ *
+ * This function creates a list of operations to perform unmapping and, if
+ * required, splitting of the mappings overlapping the unmap range.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and must be processed
+ * in the given order. It can contain unmap and remap operations, depending on
+ * whether there are actual overlapping mappings to split.
+ *
+ * There can be an arbitrary amount of unmap operations and a maximum of two
+ * remap operations.
+ *
+ * Note that before calling this function again with another range to unmap it
+ * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
+ * previously obtained operations must be processed or abandoned. To update the
+ * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
+ * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
+ * used.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
+ u64 req_addr, u64 req_range)
+{
+ struct drm_gpuva_ops *ops;
+ struct {
+ struct drm_gpuva_manager *mgr;
+ struct drm_gpuva_ops *ops;
+ } args;
+ int ret;
+
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (unlikely(!ops))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ops->list);
+
+ args.mgr = mgr;
+ args.ops = ops;
+
+ ret = __drm_gpuva_sm_unmap(mgr, &gpuva_list_ops, &args,
+ req_addr, req_range);
+ if (ret)
+ goto err_free_ops;
+
+ return ops;
+
+err_free_ops:
+ drm_gpuva_ops_free(mgr, ops);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap_ops_create);
+
+/**
+ * drm_gpuva_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @addr: the start address of the range to prefetch
+ * @range: the range of the mappings to prefetch
+ *
+ * This function creates a list of operations to perform prefetching.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and must be processed
+ * in the given order. It can contain prefetch operations.
+ *
+ * There can be an arbitrary amount of prefetch operations.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range)
+{
+ struct drm_gpuva_ops *ops;
+ struct drm_gpuva_op *op;
+ struct drm_gpuva *va;
+ u64 end = addr + range;
+ int ret;
+
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ops->list);
+
+ drm_gpuva_for_each_va_range(va, mgr, addr, end) {
+ op = gpuva_op_alloc(mgr);
+ if (!op) {
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ op->op = DRM_GPUVA_OP_PREFETCH;
+ op->prefetch.va = va;
+ list_add_tail(&op->entry, &ops->list);
+ }
+
+ return ops;
+
+err_free_ops:
+ drm_gpuva_ops_free(mgr, ops);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_prefetch_ops_create);
+
+/**
+ * drm_gpuva_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
+ * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @obj: the &drm_gem_object to unmap
+ *
+ * This function creates a list of operations to perform unmapping for every
+ * GPUVA attached to a GEM.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and consists out of an
+ * arbitrary amount of unmap operations.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * It is the callers responsibility to protect the GEMs GPUVA list against
+ * concurrent access using the GEMs dma_resv lock.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
+ struct drm_gem_object *obj)
+{
+ struct drm_gpuva_ops *ops;
+ struct drm_gpuva_op *op;
+ struct drm_gpuva *va;
+ int ret;
+
+ drm_gem_gpuva_assert_lock_held(obj);
+
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ops->list);
+
+ drm_gem_for_each_gpuva(va, obj) {
+ op = gpuva_op_alloc(mgr);
+ if (!op) {
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ op->op = DRM_GPUVA_OP_UNMAP;
+ op->unmap.va = va;
+ list_add_tail(&op->entry, &ops->list);
+ }
+
+ return ops;
+
+err_free_ops:
+ drm_gpuva_ops_free(mgr, ops);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_gem_unmap_ops_create);
+
+/**
+ * drm_gpuva_ops_free() - free the given &drm_gpuva_ops
+ * @mgr: the &drm_gpuva_manager the ops were created for
+ * @ops: the &drm_gpuva_ops to free
+ *
+ * Frees the given &drm_gpuva_ops structure including all the ops associated
+ * with it.
+ */
+void
+drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
+ struct drm_gpuva_ops *ops)
+{
+ struct drm_gpuva_op *op, *next;
+
+ drm_gpuva_for_each_op_safe(op, next, ops) {
+ list_del(&op->entry);
+
+ if (op->op == DRM_GPUVA_OP_REMAP) {
+ kfree(op->remap.prev);
+ kfree(op->remap.next);
+ kfree(op->remap.unmap);
+ }
+
+ gpuva_op_free(mgr, op);
+ }
+
+ kfree(ops);
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index d7e023bbb0d5..ba12acd55139 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -245,6 +245,8 @@ int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 7c9d66ee917d..f03ffbacfe9b 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -245,8 +245,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
req->value = 1;
return 0;
case DRM_CAP_PRIME:
- req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
- req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
+ req->value = DRM_PRIME_CAP_IMPORT | DRM_PRIME_CAP_EXPORT;
return 0;
case DRM_CAP_SYNCOBJ:
req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ);
@@ -702,6 +701,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_EVENTFD, drm_syncobj_eventfd_ioctl,
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index 4cf214de50c4..bcd111404b12 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -49,10 +49,10 @@ struct drmres {
* Some archs want to perform DMA into kmalloc caches
* and need a guaranteed alignment larger than
* the alignment of a 64-bit integer.
- * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
- * buffer alignment as if it was allocated by plain kmalloc().
+ * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
+ * alignment for struct drmres when allocated by kmalloc().
*/
- u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
+ u8 __aligned(ARCH_DMA_MINALIGN) data[];
};
static void free_dr(struct drmres *dr)
@@ -196,7 +196,7 @@ void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
size, gfp);
return NULL;
}
- dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
+ dr->node.name = kstrdup_const("kmalloc", gfp);
add_dr(dev, dr);
@@ -264,28 +264,10 @@ void drmm_kfree(struct drm_device *dev, void *data)
}
EXPORT_SYMBOL(drmm_kfree);
-static void drmm_mutex_release(struct drm_device *dev, void *res)
+void __drmm_mutex_release(struct drm_device *dev, void *res)
{
struct mutex *lock = res;
mutex_destroy(lock);
}
-
-/**
- * drmm_mutex_init - &drm_device-managed mutex_init()
- * @dev: DRM device
- * @lock: lock to be initialized
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise.
- *
- * This is a &drm_device-managed version of mutex_init(). The initialized
- * lock is automatically destroyed on the final drm_dev_put().
- */
-int drmm_mutex_init(struct drm_device *dev, struct mutex *lock)
-{
- mutex_init(lock);
-
- return drmm_add_action_or_reset(dev, drmm_mutex_release, lock);
-}
-EXPORT_SYMBOL(drmm_mutex_init);
+EXPORT_SYMBOL(__drmm_mutex_release);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index c871d9f096b8..e90f0bf895b3 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -1140,10 +1140,13 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *dbi, u8 *cmd,
return -ENOMEM;
tr[1].rx_buf = buf;
+
+ spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(dbi->dc, 0);
spi_message_init_with_transfers(&m, tr, ARRAY_SIZE(tr));
- ret = spi_sync(spi, &m);
+ ret = spi_sync_locked(spi, &m);
+ spi_bus_unlock(spi->controller);
if (ret)
goto err_free;
@@ -1177,19 +1180,24 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *dbi, u8 *cmd,
MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
+ spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(dbi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, cmd, 1);
+ spi_bus_unlock(spi->controller);
if (ret || !num)
return ret;
if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !dbi->swap_bytes)
bpw = 16;
+ spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(dbi->dc, 1);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
+ spi_bus_unlock(spi->controller);
- return mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
+ return ret;
}
/**
@@ -1271,7 +1279,8 @@ EXPORT_SYMBOL(mipi_dbi_spi_init);
* @len: Buffer length
*
* This SPI transfer helper breaks up the transfer of @buf into chunks which
- * the SPI controller driver can handle.
+ * the SPI controller driver can handle. The SPI bus must be locked when
+ * calling this.
*
* Returns:
* Zero on success, negative error code on failure.
@@ -1305,7 +1314,7 @@ int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
buf += chunk;
len -= chunk;
- ret = spi_sync(spi, &m);
+ ret = spi_sync_locked(spi, &m);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 3fd6c733ff4e..14201f73aab1 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -27,6 +27,7 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -223,7 +224,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
device_set_node(&dsi->dev, of_fwnode_handle(info->node));
dsi->channel = info->channel;
- strlcpy(dsi->name, info->type, sizeof(dsi->name));
+ strscpy(dsi->name, info->type, sizeof(dsi->name));
ret = mipi_dsi_device_add(dsi);
if (ret) {
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index ba1608effc0f..ac0d2ce3f870 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -147,8 +147,10 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
obj = NULL;
if (obj && drm_mode_object_lease_required(obj->type) &&
- !_drm_lease_held(file_priv, obj->id))
+ !_drm_lease_held(file_priv, obj->id)) {
+ drm_dbg_kms(dev, "[OBJECT:%d] not included in lease", id);
obj = NULL;
+ }
if (obj && obj->free_cb) {
if (!kref_get_unless_zero(&obj->refcount))
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index f634371c717a..e814020bbcd3 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -58,6 +58,8 @@ void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs, int connector_type)
{
INIT_LIST_HEAD(&panel->list);
+ INIT_LIST_HEAD(&panel->followers);
+ mutex_init(&panel->follower_lock);
panel->dev = dev;
panel->funcs = funcs;
panel->connector_type = connector_type;
@@ -105,13 +107,38 @@ EXPORT_SYMBOL(drm_panel_remove);
*/
int drm_panel_prepare(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
+ int ret;
+
if (!panel)
return -EINVAL;
- if (panel->funcs && panel->funcs->prepare)
- return panel->funcs->prepare(panel);
+ if (panel->prepared) {
+ dev_warn(panel->dev, "Skipping prepare of already prepared panel\n");
+ return 0;
+ }
+
+ mutex_lock(&panel->follower_lock);
- return 0;
+ if (panel->funcs && panel->funcs->prepare) {
+ ret = panel->funcs->prepare(panel);
+ if (ret < 0)
+ goto exit;
+ }
+ panel->prepared = true;
+
+ list_for_each_entry(follower, &panel->followers, list) {
+ ret = follower->funcs->panel_prepared(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_prepared, ret);
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&panel->follower_lock);
+
+ return ret;
}
EXPORT_SYMBOL(drm_panel_prepare);
@@ -128,13 +155,38 @@ EXPORT_SYMBOL(drm_panel_prepare);
*/
int drm_panel_unprepare(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
+ int ret;
+
if (!panel)
return -EINVAL;
- if (panel->funcs && panel->funcs->unprepare)
- return panel->funcs->unprepare(panel);
+ if (!panel->prepared) {
+ dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
+ return 0;
+ }
- return 0;
+ mutex_lock(&panel->follower_lock);
+
+ list_for_each_entry(follower, &panel->followers, list) {
+ ret = follower->funcs->panel_unpreparing(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_unpreparing, ret);
+ }
+
+ if (panel->funcs && panel->funcs->unprepare) {
+ ret = panel->funcs->unprepare(panel);
+ if (ret < 0)
+ goto exit;
+ }
+ panel->prepared = false;
+
+ ret = 0;
+exit:
+ mutex_unlock(&panel->follower_lock);
+
+ return ret;
}
EXPORT_SYMBOL(drm_panel_unprepare);
@@ -155,11 +207,17 @@ int drm_panel_enable(struct drm_panel *panel)
if (!panel)
return -EINVAL;
+ if (panel->enabled) {
+ dev_warn(panel->dev, "Skipping enable of already enabled panel\n");
+ return 0;
+ }
+
if (panel->funcs && panel->funcs->enable) {
ret = panel->funcs->enable(panel);
if (ret < 0)
return ret;
}
+ panel->enabled = true;
ret = backlight_enable(panel->backlight);
if (ret < 0)
@@ -187,13 +245,22 @@ int drm_panel_disable(struct drm_panel *panel)
if (!panel)
return -EINVAL;
+ if (!panel->enabled) {
+ dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
+ return 0;
+ }
+
ret = backlight_disable(panel->backlight);
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to disable backlight: %d\n",
ret);
- if (panel->funcs && panel->funcs->disable)
- return panel->funcs->disable(panel);
+ if (panel->funcs && panel->funcs->disable) {
+ ret = panel->funcs->disable(panel);
+ if (ret < 0)
+ return ret;
+ }
+ panel->enabled = false;
return 0;
}
@@ -305,6 +372,141 @@ int of_drm_get_panel_orientation(const struct device_node *np,
EXPORT_SYMBOL(of_drm_get_panel_orientation);
#endif
+/**
+ * drm_is_panel_follower() - Check if the device is a panel follower
+ * @dev: The 'struct device' to check
+ *
+ * This checks to see if a device needs to be power sequenced together with
+ * a panel using the panel follower API.
+ * At the moment panels can only be followed on device tree enabled systems.
+ * The "panel" property of the follower points to the panel to be followed.
+ *
+ * Return: true if we should be power sequenced with a panel; false otherwise.
+ */
+bool drm_is_panel_follower(struct device *dev)
+{
+ /*
+ * The "panel" property is actually a phandle, but for simplicity we
+ * don't bother trying to parse it here. We just need to know if the
+ * property is there.
+ */
+ return of_property_read_bool(dev->of_node, "panel");
+}
+EXPORT_SYMBOL(drm_is_panel_follower);
+
+/**
+ * drm_panel_add_follower() - Register something to follow panel state.
+ * @follower_dev: The 'struct device' for the follower.
+ * @follower: The panel follower descriptor for the follower.
+ *
+ * A panel follower is called right after preparing the panel and right before
+ * unpreparing the panel. It's primary intention is to power on an associated
+ * touchscreen, though it could be used for any similar devices. Multiple
+ * devices are allowed the follow the same panel.
+ *
+ * If a follower is added to a panel that's already been turned on, the
+ * follower's prepare callback is called right away.
+ *
+ * At the moment panels can only be followed on device tree enabled systems.
+ * The "panel" property of the follower points to the panel to be followed.
+ *
+ * Return: 0 or an error code. Note that -ENODEV means that we detected that
+ * follower_dev is not actually following a panel. The caller may
+ * choose to ignore this return value if following a panel is optional.
+ */
+int drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ struct device_node *panel_np;
+ struct drm_panel *panel;
+ int ret;
+
+ panel_np = of_parse_phandle(follower_dev->of_node, "panel", 0);
+ if (!panel_np)
+ return -ENODEV;
+
+ panel = of_drm_find_panel(panel_np);
+ of_node_put(panel_np);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
+ get_device(panel->dev);
+ follower->panel = panel;
+
+ mutex_lock(&panel->follower_lock);
+
+ list_add_tail(&follower->list, &panel->followers);
+ if (panel->prepared) {
+ ret = follower->funcs->panel_prepared(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_prepared, ret);
+ }
+
+ mutex_unlock(&panel->follower_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_panel_add_follower);
+
+/**
+ * drm_panel_remove_follower() - Reverse drm_panel_add_follower().
+ * @follower: The panel follower descriptor for the follower.
+ *
+ * Undo drm_panel_add_follower(). This includes calling the follower's
+ * unprepare function if we're removed from a panel that's currently prepared.
+ *
+ * Return: 0 or an error code.
+ */
+void drm_panel_remove_follower(struct drm_panel_follower *follower)
+{
+ struct drm_panel *panel = follower->panel;
+ int ret;
+
+ mutex_lock(&panel->follower_lock);
+
+ if (panel->prepared) {
+ ret = follower->funcs->panel_unpreparing(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_unpreparing, ret);
+ }
+ list_del_init(&follower->list);
+
+ mutex_unlock(&panel->follower_lock);
+
+ put_device(panel->dev);
+}
+EXPORT_SYMBOL(drm_panel_remove_follower);
+
+static void drm_panel_remove_follower_void(void *follower)
+{
+ drm_panel_remove_follower(follower);
+}
+
+/**
+ * devm_drm_panel_add_follower() - devm version of drm_panel_add_follower()
+ * @follower_dev: The 'struct device' for the follower.
+ * @follower: The panel follower descriptor for the follower.
+ *
+ * Handles calling drm_panel_remove_follower() using devm on the follower_dev.
+ *
+ * Return: 0 or an error code.
+ */
+int devm_drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ int ret;
+
+ ret = drm_panel_add_follower(follower_dev, follower);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(follower_dev,
+ drm_panel_remove_follower_void, follower);
+}
+EXPORT_SYMBOL(devm_drm_panel_add_follower);
+
#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
/**
* drm_panel_of_backlight - use backlight device node for backlight
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index b1a38e6ce2f8..0cb646cb04ee 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -179,7 +179,7 @@ static const struct dmi_system_id orientation_data[] = {
}, { /* AYA NEO AIR */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "AIR"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
},
.driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYA NEO NEXT */
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index c91e454eba09..5e95089676ff 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -40,8 +40,8 @@
/**
* DOC: overview
*
- * This helper library has two parts. The first part has support to implement
- * primary plane support on top of the normal CRTC configuration interface.
+ * This helper library contains helpers to implement primary plane support on
+ * top of the normal CRTC configuration interface.
* Since the legacy &drm_mode_config_funcs.set_config interface ties the primary
* plane together with the CRTC state this does not allow userspace to disable
* the primary plane itself. The default primary plane only expose XRBG8888 and
@@ -51,14 +51,6 @@
* planes, and newly merged drivers must not rely upon these transitional
* helpers.
*
- * The second part also implements transitional helpers which allow drivers to
- * gradually switch to the atomic helper infrastructure for plane updates. Once
- * that switch is complete drivers shouldn't use these any longer, instead using
- * the proper legacy implementations for update and disable plane hooks provided
- * by the atomic helpers.
- *
- * Again drivers are strongly urged to switch to the new interfaces.
- *
* The plane helpers share the function table structures with other helpers,
* specifically also the atomic helpers. See &struct drm_plane_helper_funcs for
* the details.
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index d29dafce9bb0..63b709a67471 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -51,15 +51,10 @@ MODULE_IMPORT_NS(DMA_BUF);
* between applications, they can't be guessed like the globally unique GEM
* names.
*
- * Drivers that support the PRIME API implement the
- * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
- * GEM based drivers must use drm_gem_prime_handle_to_fd() and
- * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
- * actual driver interfaces is provided through the &drm_gem_object_funcs.export
- * and &drm_driver.gem_prime_import hooks.
- *
- * &dma_buf_ops implementations for GEM drivers are all individually exported
- * for drivers which need to overwrite or reimplement some of them.
+ * Drivers that support the PRIME API implement the drm_gem_object_funcs.export
+ * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for
+ * drivers are all individually exported for drivers which need to overwrite
+ * or reimplement some of them.
*
* Reference Counting for GEM Drivers
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -283,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
-/**
+/*
* drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
* @dev: drm_device to import into
* @file_priv: drm file-private structure
@@ -297,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
*
* Returns 0 on success or a negative error code on failure.
*/
-int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd,
- uint32_t *handle)
+static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle)
{
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
@@ -365,18 +360,18 @@ out_put:
dma_buf_put(dma_buf);
return ret;
}
-EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_prime_handle *args = data;
- if (!dev->driver->prime_fd_to_handle)
- return -ENOSYS;
+ if (dev->driver->prime_fd_to_handle) {
+ return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd,
+ &args->handle);
+ }
- return dev->driver->prime_fd_to_handle(dev, file_priv,
- args->fd, &args->handle);
+ return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle);
}
static struct dma_buf *export_and_register_object(struct drm_device *dev,
@@ -413,7 +408,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
return dmabuf;
}
-/**
+/*
* drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
* @dev: dev to export the buffer from
* @file_priv: drm file-private structure
@@ -426,10 +421,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
* The actual exporting from GEM object to a dma-buf is done through the
* &drm_gem_object_funcs.export callback.
*/
-int drm_gem_prime_handle_to_fd(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle,
- uint32_t flags,
- int *prime_fd)
+static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags,
+ int *prime_fd)
{
struct drm_gem_object *obj;
int ret = 0;
@@ -511,22 +506,23 @@ out_unlock:
return ret;
}
-EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_prime_handle *args = data;
- if (!dev->driver->prime_handle_to_fd)
- return -ENOSYS;
-
/* check flags are valid */
if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
return -EINVAL;
- return dev->driver->prime_handle_to_fd(dev, file_priv,
- args->handle, args->flags, &args->fd);
+ if (dev->driver->prime_handle_to_fd) {
+ return dev->driver->prime_handle_to_fd(dev, file_priv,
+ args->handle, args->flags,
+ &args->fd);
+ }
+ return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle,
+ args->flags, &args->fd);
}
/**
@@ -715,8 +711,6 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
* the same codepath that is used for regular GEM buffer mapping on the DRM fd.
* The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
* called to set up the mapping.
- *
- * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
*/
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
@@ -772,25 +766,15 @@ EXPORT_SYMBOL(drm_gem_prime_mmap);
* @vma: virtual address range
*
* Provides memory mapping for the buffer. This can be used as the
- * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
- * which should be set to drm_gem_prime_mmap().
- *
- * FIXME: There's really no point to this wrapper, drivers which need anything
- * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
+ * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap().
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
-
- dma_resv_assert_held(dma_buf->resv);
-
- if (!dev->driver->gem_prime_mmap)
- return -ENOSYS;
- return dev->driver->gem_prime_mmap(obj, vma);
+ return drm_gem_prime_mmap(obj, vma);
}
EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
@@ -880,9 +864,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size);
* @obj: GEM object to export
* @flags: flags like DRM_CLOEXEC and DRM_RDWR
*
- * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
- * using the PRIME helpers. It is used as the default in
- * drm_gem_prime_handle_to_fd().
+ * This is the implementation of the &drm_gem_object_funcs.export functions
+ * for GEM drivers using the PRIME helpers. It is used as the default for
+ * drivers that do not set their own.
*/
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
int flags)
@@ -978,10 +962,9 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
*
- * This is the implementation of the gem_prime_import functions for GEM drivers
- * using the PRIME helpers. Drivers can use this as their
- * &drm_driver.gem_prime_import implementation. It is used as the default
- * implementation in drm_gem_prime_fd_to_handle().
+ * This is the implementation of the gem_prime_import functions for GEM
+ * drivers using the PRIME helpers. It is the default for drivers that do
+ * not set their own &drm_driver.gem_prime_import.
*
* Drivers must arrange to call drm_prime_gem_destroy() from their
* &drm_gem_object_funcs.free hook when using this function.
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 0c2be8360525..f7003d1ec5ef 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -136,6 +136,10 @@
* requirement is inherited from the wait-before-signal behavior required by
* the Vulkan timeline semaphore API.
*
+ * Alternatively, &DRM_IOCTL_SYNCOBJ_EVENTFD can be used to wait without
+ * blocking: an eventfd will be signaled when the syncobj is. This is useful to
+ * integrate the wait in an event loop.
+ *
*
* Import/export of syncobjs
* -------------------------
@@ -185,6 +189,7 @@
#include <linux/anon_inodes.h>
#include <linux/dma-fence-unwrap.h>
+#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
@@ -212,6 +217,20 @@ struct syncobj_wait_entry {
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait);
+struct syncobj_eventfd_entry {
+ struct list_head node;
+ struct dma_fence *fence;
+ struct dma_fence_cb fence_cb;
+ struct drm_syncobj *syncobj;
+ struct eventfd_ctx *ev_fd_ctx;
+ u64 point;
+ u32 flags;
+};
+
+static void
+syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+ struct syncobj_eventfd_entry *entry);
+
/**
* drm_syncobj_find - lookup and reference a sync object.
* @file_private: drm file private pointer
@@ -274,6 +293,28 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
spin_unlock(&syncobj->lock);
}
+static void
+syncobj_eventfd_entry_free(struct syncobj_eventfd_entry *entry)
+{
+ eventfd_ctx_put(entry->ev_fd_ctx);
+ dma_fence_put(entry->fence);
+ /* This happens either inside the syncobj lock, or after the node has
+ * already been removed from the list.
+ */
+ list_del(&entry->node);
+ kfree(entry);
+}
+
+static void
+drm_syncobj_add_eventfd(struct drm_syncobj *syncobj,
+ struct syncobj_eventfd_entry *entry)
+{
+ spin_lock(&syncobj->lock);
+ list_add_tail(&entry->node, &syncobj->ev_fd_list);
+ syncobj_eventfd_entry_func(syncobj, entry);
+ spin_unlock(&syncobj->lock);
+}
+
/**
* drm_syncobj_add_point - add new timeline point to the syncobj
* @syncobj: sync object to add timeline point do
@@ -288,7 +329,8 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
struct dma_fence *fence,
uint64_t point)
{
- struct syncobj_wait_entry *cur, *tmp;
+ struct syncobj_wait_entry *wait_cur, *wait_tmp;
+ struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
struct dma_fence *prev;
dma_fence_get(fence);
@@ -302,8 +344,10 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
dma_fence_chain_init(chain, prev, fence, point);
rcu_assign_pointer(syncobj->fence, &chain->base);
- list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
- syncobj_wait_syncobj_func(syncobj, cur);
+ list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
+ syncobj_wait_syncobj_func(syncobj, wait_cur);
+ list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+ syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
spin_unlock(&syncobj->lock);
/* Walk the chain once to trigger garbage collection */
@@ -323,7 +367,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence)
{
struct dma_fence *old_fence;
- struct syncobj_wait_entry *cur, *tmp;
+ struct syncobj_wait_entry *wait_cur, *wait_tmp;
+ struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
if (fence)
dma_fence_get(fence);
@@ -335,8 +380,10 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
rcu_assign_pointer(syncobj->fence, fence);
if (fence != old_fence) {
- list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
- syncobj_wait_syncobj_func(syncobj, cur);
+ list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
+ syncobj_wait_syncobj_func(syncobj, wait_cur);
+ list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+ syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
}
spin_unlock(&syncobj->lock);
@@ -353,10 +400,10 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
*/
static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
{
- struct dma_fence *fence = dma_fence_allocate_private_stub();
+ struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
- if (IS_ERR(fence))
- return PTR_ERR(fence);
+ if (!fence)
+ return -ENOMEM;
drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence);
@@ -472,7 +519,13 @@ void drm_syncobj_free(struct kref *kref)
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
+ struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
+
drm_syncobj_replace_fence(syncobj, NULL);
+
+ list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+ syncobj_eventfd_entry_free(ev_fd_cur);
+
kfree(syncobj);
}
EXPORT_SYMBOL(drm_syncobj_free);
@@ -501,6 +554,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
kref_init(&syncobj->refcount);
INIT_LIST_HEAD(&syncobj->cb_list);
+ INIT_LIST_HEAD(&syncobj->ev_fd_list);
spin_lock_init(&syncobj->lock);
if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
@@ -1304,6 +1358,88 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
return ret;
}
+static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
+{
+ struct syncobj_eventfd_entry *entry =
+ container_of(cb, struct syncobj_eventfd_entry, fence_cb);
+
+ eventfd_signal(entry->ev_fd_ctx, 1);
+ syncobj_eventfd_entry_free(entry);
+}
+
+static void
+syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+ struct syncobj_eventfd_entry *entry)
+{
+ int ret;
+ struct dma_fence *fence;
+
+ /* This happens inside the syncobj lock */
+ fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+ ret = dma_fence_chain_find_seqno(&fence, entry->point);
+ if (ret != 0 || !fence) {
+ dma_fence_put(fence);
+ return;
+ }
+
+ list_del_init(&entry->node);
+ entry->fence = fence;
+
+ if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
+ eventfd_signal(entry->ev_fd_ctx, 1);
+ syncobj_eventfd_entry_free(entry);
+ } else {
+ ret = dma_fence_add_callback(fence, &entry->fence_cb,
+ syncobj_eventfd_entry_fence_func);
+ if (ret == -ENOENT) {
+ eventfd_signal(entry->ev_fd_ctx, 1);
+ syncobj_eventfd_entry_free(entry);
+ }
+ }
+}
+
+int
+drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_eventfd *args = data;
+ struct drm_syncobj *syncobj;
+ struct eventfd_ctx *ev_fd_ctx;
+ struct syncobj_eventfd_entry *entry;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)
+ return -EINVAL;
+
+ if (args->pad)
+ return -EINVAL;
+
+ syncobj = drm_syncobj_find(file_private, args->handle);
+ if (!syncobj)
+ return -ENOENT;
+
+ ev_fd_ctx = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(ev_fd_ctx))
+ return PTR_ERR(ev_fd_ctx);
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ eventfd_ctx_put(ev_fd_ctx);
+ return -ENOMEM;
+ }
+ entry->syncobj = syncobj;
+ entry->ev_fd_ctx = ev_fd_ctx;
+ entry->point = args->point;
+ entry->flags = args->flags;
+
+ drm_syncobj_add_eventfd(syncobj, entry);
+ drm_syncobj_put(syncobj);
+
+ return 0;
+}
int
drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index f62767ff34b2..b169b3e44a92 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -487,17 +487,17 @@ void drm_sysfs_connector_hotplug_event(struct drm_connector *connector)
EXPORT_SYMBOL(drm_sysfs_connector_hotplug_event);
/**
- * drm_sysfs_connector_status_event - generate a DRM uevent for connector
- * property status change
- * @connector: connector on which property status changed
- * @property: connector property whose status changed.
+ * drm_sysfs_connector_property_event - generate a DRM uevent for connector
+ * property change
+ * @connector: connector on which property changed
+ * @property: connector property which has changed.
*
- * Send a uevent for the DRM device specified by @dev. Currently we
+ * Send a uevent for the specified DRM connector and property. Currently we
* set HOTPLUG=1 and connector id along with the attached property id
- * related to the status change.
+ * related to the change.
*/
-void drm_sysfs_connector_status_event(struct drm_connector *connector,
- struct drm_property *property)
+void drm_sysfs_connector_property_event(struct drm_connector *connector,
+ struct drm_property *property)
{
struct drm_device *dev = connector->dev;
char hotplug_str[] = "HOTPLUG=1", conn_id[21], prop_id[21];
@@ -511,11 +511,14 @@ void drm_sysfs_connector_status_event(struct drm_connector *connector,
snprintf(prop_id, ARRAY_SIZE(prop_id),
"PROPERTY=%u", property->base.id);
- DRM_DEBUG("generating connector status event\n");
+ drm_dbg_kms(connector->dev,
+ "[CONNECTOR:%d:%s] generating connector property event for [PROP:%d:%s]\n",
+ connector->base.id, connector->name,
+ property->base.id, property->name);
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
-EXPORT_SYMBOL(drm_sysfs_connector_status_event);
+EXPORT_SYMBOL(drm_sysfs_connector_property_event);
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index cf741c5c82d2..384df1659be6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -53,11 +53,12 @@ static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
OUT(buffer, VIV_FE_END_HEADER_OP_END);
}
-static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
+static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer,
+ unsigned int waitcycles)
{
buffer->user_size = ALIGN(buffer->user_size, 8);
- OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
+ OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | waitcycles);
}
static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
@@ -168,7 +169,7 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
/* initialize buffer */
buffer->user_size = 0;
- CMD_WAIT(buffer);
+ CMD_WAIT(buffer, gpu->fe_waitcycles);
CMD_LINK(buffer, 2,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);
@@ -320,7 +321,7 @@ void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
CMD_END(buffer);
/* Append waitlink */
- CMD_WAIT(buffer);
+ CMD_WAIT(buffer, gpu->fe_waitcycles);
CMD_LINK(buffer, 2,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);
@@ -503,7 +504,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
- CMD_WAIT(buffer);
+ CMD_WAIT(buffer, gpu->fe_waitcycles);
CMD_LINK(buffer, 2,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index 9dc20d892c15..721d633aece9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -121,6 +121,9 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
int order = order_base_2(ALIGN(cmdbuf->size, SUBALLOC_GRANULE) /
SUBALLOC_GRANULE);
+ if (!suballoc)
+ return;
+
mutex_lock(&suballoc->lock);
bitmap_release_region(suballoc->granule_map,
cmdbuf->suballoc_offset / SUBALLOC_GRANULE,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 31a7f59ccb49..a8d3fa81e4ec 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -6,7 +6,9 @@
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <drm/drm_debugfs.h>
@@ -481,10 +483,7 @@ static const struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = etnaviv_open,
.postclose = etnaviv_postclose,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 44b5f3c35aab..898f84a0fc30 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -130,9 +130,9 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
return;
etnaviv_dump_core = false;
- mutex_lock(&gpu->mmu_context->lock);
+ mutex_lock(&submit->mmu_context->lock);
- mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
+ mmu_size = etnaviv_iommu_dump_size(submit->mmu_context);
/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
n_obj = 5;
@@ -162,7 +162,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
__GFP_NORETRY);
if (!iter.start) {
- mutex_unlock(&gpu->mmu_context->lock);
+ mutex_unlock(&submit->mmu_context->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
}
@@ -174,18 +174,18 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
memset(iter.hdr, 0, iter.data - iter.start);
etnaviv_core_dump_registers(&iter, gpu);
- etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
+ etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer,
- &gpu->mmu_context->cmdbuf_mapping));
+ &submit->mmu_context->cmdbuf_mapping));
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf,
- &gpu->mmu_context->cmdbuf_mapping));
+ &submit->mmu_context->cmdbuf_mapping));
- mutex_unlock(&gpu->mmu_context->lock);
+ mutex_unlock(&submit->mmu_context->lock);
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index baa81cbf701a..a42d260cac2c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -97,7 +97,6 @@ struct etnaviv_gem_submit {
struct list_head node; /* GPU active submit list */
struct etnaviv_cmdbuf cmdbuf;
struct pid *pid; /* submitting process */
- bool runtime_resumed;
u32 exec_state;
u32 flags;
unsigned int nr_pmrs;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 45403ea38906..2416c526f9b0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -362,9 +362,6 @@ static void submit_cleanup(struct kref *kref)
container_of(kref, struct etnaviv_gem_submit, refcount);
unsigned i;
- if (submit->runtime_resumed)
- pm_runtime_put_autosuspend(submit->gpu->dev);
-
if (submit->cmdbuf.suballoc)
etnaviv_cmdbuf_free(&submit->cmdbuf);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index de8c9894967c..9276756e1397 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -8,8 +8,8 @@
#include <linux/delay.h>
#include <linux/dma-fence.h>
#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -493,6 +493,14 @@ static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
etnaviv_gpu_load_clock(gpu, clock);
}
+
+ /*
+ * Choose number of wait cycles to target a ~30us (1/32768) max latency
+ * until new work is picked up by the FE when it polls in the idle loop.
+ * If the GPU base frequency is unknown use 200 wait cycles.
+ */
+ gpu->fe_waitcycles = clamp(gpu->base_rate_core >> (15 - gpu->freq_scale),
+ 200UL, 0xffffUL);
}
static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
@@ -576,7 +584,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* We rely on the GPU running, so program the clock */
etnaviv_gpu_update_clock(gpu);
- gpu->fe_running = false;
+ gpu->state = ETNA_GPU_STATE_RESET;
gpu->exec_state = -1;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
@@ -651,8 +659,6 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
}
-
- gpu->fe_running = true;
}
static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
@@ -661,6 +667,8 @@ static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
u16 prefetch;
u32 address;
+ WARN_ON(gpu->state != ETNA_GPU_STATE_INITIALIZED);
+
/* setup the MMU */
etnaviv_iommu_restore(gpu, context);
@@ -670,6 +678,8 @@ static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
&gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch);
+
+ gpu->state = ETNA_GPU_STATE_RUNNING;
}
static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
@@ -705,6 +715,9 @@ static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
+ WARN_ON(!(gpu->state == ETNA_GPU_STATE_IDENTIFIED ||
+ gpu->state == ETNA_GPU_STATE_RESET));
+
if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
@@ -751,6 +764,8 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
etnaviv_gpu_setup_pulse_eater(gpu);
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
+
+ gpu->state = ETNA_GPU_STATE_INITIALIZED;
}
int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
@@ -793,6 +808,8 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
(gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
gpu->sec_mode = ETNA_SEC_KERNEL;
+ gpu->state = ETNA_GPU_STATE_IDENTIFIED;
+
ret = etnaviv_hw_reset(gpu);
if (ret) {
dev_err(gpu->dev, "GPU reset failed\n");
@@ -859,8 +876,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
- gpu->initialized = true;
-
return 0;
fail:
@@ -1059,50 +1074,6 @@ pm_put:
}
#endif
-void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit)
-{
- struct etnaviv_gpu *gpu = submit->gpu;
- char *comm = NULL, *cmd = NULL;
- struct task_struct *task;
- unsigned int i;
-
- dev_err(gpu->dev, "recover hung GPU!\n");
-
- task = get_pid_task(submit->pid, PIDTYPE_PID);
- if (task) {
- comm = kstrdup(task->comm, GFP_KERNEL);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
- put_task_struct(task);
- }
-
- if (comm && cmd)
- dev_err(gpu->dev, "offending task: %s (%s)\n", comm, cmd);
-
- kfree(cmd);
- kfree(comm);
-
- if (pm_runtime_get_sync(gpu->dev) < 0)
- goto pm_put;
-
- mutex_lock(&gpu->lock);
-
- etnaviv_hw_reset(gpu);
-
- /* complete all events, the GPU won't do it after the reset */
- spin_lock(&gpu->event_spinlock);
- for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
- complete(&gpu->event_free);
- bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
- spin_unlock(&gpu->event_spinlock);
-
- etnaviv_gpu_hw_init(gpu);
-
- mutex_unlock(&gpu->lock);
- pm_runtime_mark_last_busy(gpu->dev);
-pm_put:
- pm_runtime_put_autosuspend(gpu->dev);
-}
-
/* fence object management */
struct etnaviv_fence {
struct etnaviv_gpu *gpu;
@@ -1183,20 +1154,22 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
unsigned int *events)
{
unsigned long timeout = msecs_to_jiffies(10 * 10000);
- unsigned i, acquired = 0;
+ unsigned i, acquired = 0, rpm_count = 0;
+ int ret;
for (i = 0; i < nr_events; i++) {
- unsigned long ret;
+ unsigned long remaining;
- ret = wait_for_completion_timeout(&gpu->event_free, timeout);
+ remaining = wait_for_completion_timeout(&gpu->event_free, timeout);
- if (!ret) {
+ if (!remaining) {
dev_err(gpu->dev, "wait_for_completion_timeout failed");
+ ret = -EBUSY;
goto out;
}
acquired++;
- timeout = ret;
+ timeout = remaining;
}
spin_lock(&gpu->event_spinlock);
@@ -1211,13 +1184,23 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
spin_unlock(&gpu->event_spinlock);
+ for (i = 0; i < nr_events; i++) {
+ ret = pm_runtime_resume_and_get(gpu->dev);
+ if (ret)
+ goto out_rpm;
+ rpm_count++;
+ }
+
return 0;
+out_rpm:
+ for (i = 0; i < rpm_count; i++)
+ pm_runtime_put_autosuspend(gpu->dev);
out:
for (i = 0; i < acquired; i++)
complete(&gpu->event_free);
- return -EBUSY;
+ return ret;
}
static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
@@ -1229,6 +1212,8 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
clear_bit(event, gpu->event_bitmap);
complete(&gpu->event_free);
}
+
+ pm_runtime_put_autosuspend(gpu->dev);
}
/*
@@ -1371,15 +1356,6 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
unsigned int i, nr_events = 1, event[3];
int ret;
- if (!submit->runtime_resumed) {
- ret = pm_runtime_get_sync(gpu->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(gpu->dev);
- return NULL;
- }
- submit->runtime_resumed = true;
- }
-
/*
* if there are performance monitor requests we need to have
* - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
@@ -1407,7 +1383,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock;
}
- if (!gpu->fe_running)
+ if (gpu->state == ETNA_GPU_STATE_INITIALIZED)
etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
if (submit->prev_mmu_context)
@@ -1454,6 +1430,49 @@ static void sync_point_worker(struct work_struct *work)
etnaviv_gpu_start_fe(gpu, addr + 2, 2);
}
+void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit)
+{
+ struct etnaviv_gpu *gpu = submit->gpu;
+ char *comm = NULL, *cmd = NULL;
+ struct task_struct *task;
+ unsigned int i;
+
+ dev_err(gpu->dev, "recover hung GPU!\n");
+
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
+ if (task) {
+ comm = kstrdup(task->comm, GFP_KERNEL);
+ cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ put_task_struct(task);
+ }
+
+ if (comm && cmd)
+ dev_err(gpu->dev, "offending task: %s (%s)\n", comm, cmd);
+
+ kfree(cmd);
+ kfree(comm);
+
+ if (pm_runtime_get_sync(gpu->dev) < 0)
+ goto pm_put;
+
+ mutex_lock(&gpu->lock);
+
+ etnaviv_hw_reset(gpu);
+
+ /* complete all events, the GPU won't do it after the reset */
+ spin_lock(&gpu->event_spinlock);
+ for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
+ event_free(gpu, i);
+ spin_unlock(&gpu->event_spinlock);
+
+ etnaviv_gpu_hw_init(gpu);
+
+ mutex_unlock(&gpu->lock);
+ pm_runtime_mark_last_busy(gpu->dev);
+pm_put:
+ pm_runtime_put_autosuspend(gpu->dev);
+}
+
static void dump_mmu_fault(struct etnaviv_gpu *gpu)
{
static const char *fault_reasons[] = {
@@ -1520,6 +1539,8 @@ static irqreturn_t irq_handler(int irq, void *data)
if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
dump_mmu_fault(gpu);
+ gpu->state = ETNA_GPU_STATE_FAULT;
+ drm_sched_fault(&gpu->sched);
intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
}
@@ -1628,9 +1649,9 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
} while (1);
}
-static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
- if (gpu->initialized && gpu->fe_running) {
+ if (gpu->state == ETNA_GPU_STATE_RUNNING) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
@@ -1643,12 +1664,10 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
*/
etnaviv_gpu_wait_idle(gpu, 100);
- gpu->fe_running = false;
+ gpu->state = ETNA_GPU_STATE_INITIALIZED;
}
gpu->exec_state = -1;
-
- return etnaviv_gpu_clk_disable(gpu);
}
static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
@@ -1733,13 +1752,11 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
if (ret)
goto out_workqueue;
- if (IS_ENABLED(CONFIG_PM))
- ret = pm_runtime_get_sync(gpu->dev);
- else
+ if (!IS_ENABLED(CONFIG_PM)) {
ret = etnaviv_gpu_clk_enable(gpu);
- if (ret < 0)
- goto out_sched;
-
+ if (ret < 0)
+ goto out_sched;
+ }
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
@@ -1751,9 +1768,6 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
priv->gpu[priv->num_gpus++] = gpu;
- pm_runtime_mark_last_busy(gpu->dev);
- pm_runtime_put_autosuspend(gpu->dev);
-
return 0;
out_sched:
@@ -1785,16 +1799,14 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
pm_runtime_put_sync_suspend(gpu->dev);
} else {
etnaviv_gpu_hw_suspend(gpu);
+ etnaviv_gpu_clk_disable(gpu);
}
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
- if (gpu->initialized) {
- etnaviv_cmdbuf_free(&gpu->buffer);
- etnaviv_iommu_global_fini(gpu);
- gpu->initialized = false;
- }
+ etnaviv_cmdbuf_free(&gpu->buffer);
+ etnaviv_iommu_global_fini(gpu);
gpu->drm = NULL;
xa_destroy(&gpu->user_fences);
@@ -1918,7 +1930,11 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
return -EBUSY;
}
- return etnaviv_gpu_hw_suspend(gpu);
+ etnaviv_gpu_hw_suspend(gpu);
+
+ gpu->state = ETNA_GPU_STATE_IDENTIFIED;
+
+ return etnaviv_gpu_clk_disable(gpu);
}
static int etnaviv_gpu_rpm_resume(struct device *dev)
@@ -1931,7 +1947,7 @@ static int etnaviv_gpu_rpm_resume(struct device *dev)
return ret;
/* Re-initialise the basic hardware state */
- if (gpu->drm && gpu->initialized) {
+ if (gpu->state == ETNA_GPU_STATE_IDENTIFIED) {
ret = etnaviv_gpu_hw_resume(gpu);
if (ret) {
etnaviv_gpu_clk_disable(gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 98c6f9c320fc..197e0037732e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -95,6 +95,15 @@ struct clk;
#define ETNA_NR_EVENTS 30
+enum etnaviv_gpu_state {
+ ETNA_GPU_STATE_UNKNOWN = 0,
+ ETNA_GPU_STATE_IDENTIFIED,
+ ETNA_GPU_STATE_RESET,
+ ETNA_GPU_STATE_INITIALIZED,
+ ETNA_GPU_STATE_RUNNING,
+ ETNA_GPU_STATE_FAULT,
+};
+
struct etnaviv_gpu {
struct drm_device *drm;
struct thermal_cooling_device *cooling;
@@ -105,8 +114,7 @@ struct etnaviv_gpu {
struct workqueue_struct *wq;
struct mutex sched_lock;
struct drm_gpu_scheduler sched;
- bool initialized;
- bool fe_running;
+ enum etnaviv_gpu_state state;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer;
@@ -150,6 +158,7 @@ struct etnaviv_gpu {
struct clk *clk_shader;
unsigned int freq_scale;
+ unsigned int fe_waitcycles;
unsigned long base_rate_core;
unsigned long base_rate_shader;
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index 2e63afa6c798..67201242438b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -39,6 +39,37 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.minor_features11 = 0x0,
},
{
+ .model = 0x520,
+ .revision = 0x5341,
+ .product_id = 0x5202,
+ .customer_id = 0x204,
+ .eco_id = 0,
+ .stream_count = 1,
+ .register_max = 64,
+ .thread_count = 256,
+ .shader_core_count = 1,
+ .vertex_cache_size = 8,
+ .vertex_output_buffer_size = 512,
+ .pixel_pipes = 1,
+ .instruction_count = 256,
+ .num_constants = 168,
+ .buffer_size = 0,
+ .varyings_count = 8,
+ .features = 0xe02c7eca,
+ .minor_features0 = 0xe9399eff,
+ .minor_features1 = 0xfe1fb2db,
+ .minor_features2 = 0xcedf0080,
+ .minor_features3 = 0x10800005,
+ .minor_features4 = 0x20000000,
+ .minor_features5 = 0x00020880,
+ .minor_features6 = 0x00000000,
+ .minor_features7 = 0x00001000,
+ .minor_features8 = 0x00000000,
+ .minor_features9 = 0x00000000,
+ .minor_features10 = 0x00000000,
+ .minor_features11 = 0x00000000,
+ },
+ {
.model = 0x7000,
.revision = 0x6202,
.product_id = 0x70003,
@@ -197,6 +228,38 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.minor_features10 = 0x108048c0,
.minor_features11 = 0x00000010,
},
+ {
+ .model = 0x8000,
+ .revision = 0x8002,
+ .product_id = 0x5080009,
+ .customer_id = 0x9f,
+ .eco_id = 0x6000000,
+ .stream_count = 8,
+ .register_max = 64,
+ .thread_count = 256,
+ .shader_core_count = 1,
+ .nn_core_count = 6,
+ .vertex_cache_size = 16,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 1,
+ .instruction_count = 512,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 16,
+ .features = 0xe0287cac,
+ .minor_features0 = 0xc1799eff,
+ .minor_features1 = 0xfefbfadb,
+ .minor_features2 = 0xeb9d6fbf,
+ .minor_features3 = 0xedfffced,
+ .minor_features4 = 0xd30dafc7,
+ .minor_features5 = 0x7b5ac333,
+ .minor_features6 = 0xfc8ee200,
+ .minor_features7 = 0x03fffa6f,
+ .minor_features8 = 0x00fe0ef0,
+ .minor_features9 = 0x0088003c,
+ .minor_features10 = 0x108048c0,
+ .minor_features11 = 0x00000010,
+ },
};
bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 67bdce5326c6..4fa72567183a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -553,6 +553,9 @@ void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
struct etnaviv_drm_private *priv = gpu->drm->dev_private;
struct etnaviv_iommu_global *global = priv->mmu_global;
+ if (!global)
+ return;
+
if (--global->use > 0)
return;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 1ae87dfd19c4..345fec6cb1a4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -55,8 +55,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*/
dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
change = dma_addr - gpu->hangcheck_dma_addr;
- if (gpu->completed_fence != gpu->hangcheck_fence ||
- change < 0 || change > 16) {
+ if (gpu->state == ETNA_GPU_STATE_RUNNING &&
+ (gpu->completed_fence != gpu->hangcheck_fence ||
+ change < 0 || change > 16)) {
gpu->hangcheck_dma_addr = dma_addr;
gpu->hangcheck_fence = gpu->completed_fence;
goto out_no_timeout;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 7ca7e1dab52c..733b109a5095 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -7,7 +7,7 @@ config DRM_EXYNOS
select DRM_DISPLAY_HELPER if DRM_EXYNOS_DP
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
- select FB_IO_HELPERS if DRM_FBDEV_EMULATION
+ select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
select SND_SOC_HDMI_CODEC if SND_SOC
help
Choose this option if you have a Samsung SoC Exynos chipset.
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 2867b39fa35e..4d986077738b 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -12,7 +12,7 @@
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 3126f735dedc..0156a5e94435 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 4153f302de7c..d19e796c2061 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -39,13 +39,12 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
if (exynos_crtc->ops->atomic_disable)
exynos_crtc->ops->atomic_disable(exynos_crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event && !crtc->state->active) {
- spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
-
crtc->state->event = NULL;
}
+ spin_unlock_irq(&crtc->dev->event_lock);
}
static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 6b73fb7a83c3..8399256cb5c9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -109,11 +109,8 @@ static const struct drm_driver exynos_drm_driver = {
.open = exynos_drm_open,
.postclose = exynos_drm_postclose,
.dumb_create = exynos_drm_gem_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = exynos_drm_gem_prime_import,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
.ioctls = exynos_ioctls,
.num_ioctls = ARRAY_SIZE(exynos_ioctls),
.fops = &exynos_drm_driver_fops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index fc81f728e6ba..69ea33cae651 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -8,7 +8,8 @@
*/
#include <linux/component.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <drm/bridge/samsung-dsim.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index fdf65587f1fe..a379c8ca435a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -49,9 +49,9 @@ static void exynos_drm_fb_destroy(struct fb_info *info)
static const struct fb_ops exynos_drm_fb_ops = {
.owner = THIS_MODULE,
- __FB_DEFAULT_IO_OPS_RDWR,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
- __FB_DEFAULT_IO_OPS_DRAW,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_mmap = exynos_drm_fb_mmap,
.fb_destroy = exynos_drm_fb_destroy,
};
@@ -79,6 +79,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
offset = fbi->var.xoffset * fb->format->cpp[0];
offset += fbi->var.yoffset * fb->pitches[0];
+ fbi->flags |= FBINFO_VIRTFB;
fbi->screen_buffer = exynos_gem->kvaddr + offset;
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -215,10 +216,6 @@ void exynos_drm_fbdev_setup(struct drm_device *dev)
if (ret)
goto err_drm_client_init;
- ret = exynos_drm_fbdev_client_hotplug(&fb_helper->client);
- if (ret)
- drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
drm_client_register(&fb_helper->client);
return;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 7f4a0be03dd1..8dde7b1e9b35 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index ec784e58da5c..414e585ec7dd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1335,7 +1335,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
/* Let the runqueue know that there is work to do. */
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
- if (runqueue_node->async)
+ if (req->async)
goto out;
wait_for_completion(&runqueue_node->complete);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
index 74ea3c26dead..1a5ae781b56c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -34,11 +34,11 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
}
-int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+static inline int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
{
return 0;
}
-void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+static inline void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
{ }
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 964dceb28c1e..34cdabc30b4f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1426,6 +1426,6 @@ struct platform_driver gsc_driver = {
.name = "exynos-drm-gsc",
.owner = THIS_MODULE,
.pm = &gsc_pm_ops,
- .of_match_table = of_match_ptr(exynos_drm_gsc_of_match),
+ .of_match_table = exynos_drm_gsc_of_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 8706f377c349..ffb327c5139e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 20608e9780ce..f2b8b09a6b4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -11,7 +11,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 4d56c8c799c5..f5e1adfcaa51 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -469,8 +469,6 @@ static int vidi_remove(struct platform_device *pdev)
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
kfree(ctx->raw_edid);
ctx->raw_edid = NULL;
-
- return -EINVAL;
}
component_del(&pdev->dev, &vidi_component_ops);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index b7c11bdce2c8..f3aaa4ea3e68 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -21,8 +21,8 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 8d333db813b7..b302392ff0d7 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index c09ba019ba5e..a395f93449f3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -346,7 +346,7 @@ disable_clk:
return ret;
}
-static int fsl_dcu_drm_remove(struct platform_device *pdev)
+static void fsl_dcu_drm_remove(struct platform_device *pdev)
{
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
@@ -354,13 +354,11 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
drm_dev_put(fsl_dev->drm);
clk_disable_unprepare(fsl_dev->clk);
clk_unregister(fsl_dev->pix_clk);
-
- return 0;
}
static struct platform_driver fsl_dcu_drm_platform_driver = {
.probe = fsl_dcu_drm_probe,
- .remove = fsl_dcu_drm_remove,
+ .remove_new = fsl_dcu_drm_remove,
.driver = {
.name = "fsl-dcu",
.pm = &fsl_dcu_drm_pm_ops,
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index cd3d92725ed4..efb4a2dd2f80 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -3,7 +3,7 @@ config DRM_GMA500
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
depends on DRM && PCI && X86 && MMU
select DRM_KMS_HELPER
- select FB_IO_HELPERS if DRM_FBDEV_EMULATION
+ select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
select I2C
select I2C_ALGOBIT
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
index 955cbe9f05a7..98b44974d42d 100644
--- a/drivers/gpu/drm/gma500/fbdev.c
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -135,10 +135,10 @@ static void psb_fbdev_fb_destroy(struct fb_info *info)
static const struct fb_ops psb_fbdev_fb_ops = {
.owner = THIS_MODULE,
- __FB_DEFAULT_IO_OPS_RDWR,
+ __FB_DEFAULT_IOMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psb_fbdev_fb_setcolreg,
- __FB_DEFAULT_IO_OPS_DRAW,
+ __FB_DEFAULT_IOMEM_OPS_DRAW,
.fb_mmap = psb_fbdev_fb_mmap,
.fb_destroy = psb_fbdev_fb_destroy,
};
@@ -215,7 +215,7 @@ static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
}
info->fbops = &psb_fbdev_fb_ops;
- info->flags = FBINFO_DEFAULT;
+
/* Accessed stolen memory directly */
info->screen_base = dev_priv->vram_addr + backing->offset;
info->screen_size = size;
@@ -328,10 +328,6 @@ void psb_fbdev_setup(struct drm_psb_private *dev_priv)
goto err_drm_fb_helper_unprepare;
}
- ret = psb_fbdev_client_hotplug(&fb_helper->client);
- if (ret)
- drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
drm_client_register(&fb_helper->client);
return;
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index dc16a92625d4..d2f199ea3c11 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -390,7 +390,7 @@ static int gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *
mutex_lock(&gdrm->damage_lock);
if (!gdrm->shadow_buf) {
- gdrm->shadow_buf = vzalloc(fb->pitches[0] * fb->height);
+ gdrm->shadow_buf = vcalloc(fb->pitches[0], fb->height);
if (!gdrm->shadow_buf) {
mutex_unlock(&gdrm->damage_lock);
return -ENOMEM;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 0c4aa4d9b0a7..8a98fa276e8a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -63,7 +63,6 @@ static const struct drm_driver hibmc_driver = {
.debugfs_init = drm_vram_mm_debugfs_init,
.dumb_create = hibmc_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
- .gem_prime_mmap = drm_gem_prime_mmap,
};
static int __maybe_unused hibmc_pm_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index d9978b79828c..566de4658719 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -874,14 +874,12 @@ static int dsi_probe(struct platform_device *pdev)
return 0;
}
-static int dsi_remove(struct platform_device *pdev)
+static void dsi_remove(struct platform_device *pdev)
{
struct dsi_data *data = platform_get_drvdata(pdev);
struct dw_dsi *dsi = &data->dsi;
mipi_dsi_host_unregister(&dsi->host);
-
- return 0;
}
static const struct of_device_id dsi_of_match[] = {
@@ -892,7 +890,7 @@ MODULE_DEVICE_TABLE(of, dsi_of_match);
static struct platform_driver dsi_driver = {
.probe = dsi_probe,
- .remove = dsi_remove,
+ .remove_new = dsi_remove,
.driver = {
.name = "dw-dsi",
.of_match_table = dsi_of_match,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 9c5d49bf40c9..e8c77bcc6dae 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -11,9 +11,9 @@
* Xinwei Kong <[email protected]>
*/
-#include <linux/of_platform.h>
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
@@ -279,10 +279,9 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
return component_master_add_with_match(dev, &kirin_drm_ops, match);
}
-static int kirin_drm_platform_remove(struct platform_device *pdev)
+static void kirin_drm_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &kirin_drm_ops);
- return 0;
}
static const struct of_device_id kirin_drm_dt_ids[] = {
@@ -295,7 +294,7 @@ MODULE_DEVICE_TABLE(of, kirin_drm_dt_ids);
static struct platform_driver kirin_drm_platform_driver = {
.probe = kirin_drm_platform_probe,
- .remove = kirin_drm_platform_remove,
+ .remove_new = kirin_drm_platform_remove,
.driver = {
.name = "kirin-drm",
.of_match_table = kirin_drm_dt_ids,
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index a7d2c92d6c6a..8026118c6e03 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -7,6 +7,7 @@
#include <linux/hyperv.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/screen_info.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 521bdf656cca..131512a5f3bd 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -497,7 +497,7 @@ static const struct dev_pm_ops ch7006_pm_ops = {
static struct drm_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
- .probe_new = ch7006_probe,
+ .probe = ch7006_probe,
.remove = ch7006_remove,
.driver = {
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index f57f9a807542..ff23422727fc 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -420,7 +420,7 @@ MODULE_DEVICE_TABLE(i2c, sil164_ids);
static struct drm_i2c_encoder_driver sil164_driver = {
.i2c_driver = {
- .probe_new = sil164_probe,
+ .probe = sil164_probe,
.driver = {
.name = "sil164",
},
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 40bcd9067e69..d8d7de18dd65 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1951,7 +1951,7 @@ static int tda998x_create(struct device *dev)
* offset.
*/
memset(&cec_info, 0, sizeof(cec_info));
- strlcpy(cec_info.type, "tda9950", sizeof(cec_info.type));
+ strscpy(cec_info.type, "tda9950", sizeof(cec_info.type));
cec_info.addr = priv->cec_addr;
cec_info.platform_data = &priv->cec_glue;
cec_info.irq = client->irq;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 01b5a8272a27..ce397a8797f7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -17,7 +17,7 @@ config DRM_I915
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_MIPI_DSI
- select FB_IO_HELPERS if DRM_FBDEV_EMULATION
+ select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
select RELAY
select I2C
select I2C_ALGOBIT
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1c9ed4c52760..79f65eff6bb2 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -19,10 +19,15 @@ subdir-ccflags-y += -Wno-type-limits
subdir-ccflags-y += -Wno-missing-field-initializers
subdir-ccflags-y += -Wno-sign-compare
subdir-ccflags-y += -Wno-shift-negative-value
-subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
+subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
+# Fine grained warnings disable
+CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
+CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init)
+CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
+
subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
@@ -127,6 +132,7 @@ gt-y += \
gt/intel_sseu.o \
gt/intel_sseu_debugfs.o \
gt/intel_timeline.o \
+ gt/intel_tlb.o \
gt/intel_wopcm.o \
gt/intel_workarounds.o \
gt/shmem_utils.o \
@@ -192,7 +198,8 @@ i915-y += \
gt/uc/intel_gsc_fw.o \
gt/uc/intel_gsc_proxy.o \
gt/uc/intel_gsc_uc.o \
- gt/uc/intel_gsc_uc_heci_cmd_submit.o\
+ gt/uc/intel_gsc_uc_debugfs.o \
+ gt/uc/intel_gsc_uc_heci_cmd_submit.o \
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
gt/uc/intel_guc_capture.o \
@@ -269,6 +276,7 @@ i915-y += \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
display/intel_plane_initial.o \
+ display/intel_pmdemand.o \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 112d91d81fdc..4c7187f7913e 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -1259,6 +1259,9 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
+ if (!assert_port_valid(dev_priv, port))
+ return false;
+
devdata = intel_bios_encoder_data_lookup(dev_priv, port);
/* FIXME bail? */
@@ -1270,6 +1273,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
if (!dig_port)
return false;
+ dig_port->aux_ch = AUX_CH_NONE;
+
intel_connector = intel_connector_alloc();
if (!intel_connector)
goto err_connector_alloc;
@@ -1373,6 +1378,9 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(dig_port);
dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
+ if (dig_port->aux_ch == AUX_CH_NONE)
+ goto err_init_connector;
+
if (!intel_dp_init_connector(dig_port, intel_connector))
goto err_init_connector;
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 5c187e6e0472..634b14116d9d 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -659,6 +659,20 @@ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
return ret;
}
+static bool is_hdmi_port_valid(struct drm_i915_private *i915, enum port port)
+{
+ if (IS_G4X(i915) || IS_VALLEYVIEW(i915))
+ return port == PORT_B || port == PORT_C;
+ else
+ return port == PORT_B || port == PORT_C || port == PORT_D;
+}
+
+static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port)
+{
+ return !drm_WARN(&i915->drm, !is_hdmi_port_valid(i915, port),
+ "Platform does not support HDMI %c\n", port_name(port));
+}
+
void g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
@@ -667,6 +681,12 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
+ if (!assert_port_valid(dev_priv, port))
+ return;
+
+ if (!assert_hdmi_port_valid(dev_priv, port))
+ return;
+
devdata = intel_bios_encoder_data_lookup(dev_priv, port);
/* FIXME bail? */
@@ -678,6 +698,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
if (!dig_port)
return;
+ dig_port->aux_ch = AUX_CH_NONE;
+
intel_connector = intel_connector_alloc();
if (!intel_connector) {
kfree(dig_port);
@@ -753,6 +775,5 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(dig_port);
- dig_port->aux_ch = intel_dp_aux_ch(intel_encoder);
intel_hdmi_init_connector(dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 616654adbfb8..b10488324457 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -1033,10 +1033,13 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
DSPLINOFF(i9xx_plane));
base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & DISP_ADDR_MASK;
} else {
+ offset = 0;
base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
}
plane_config->base = base;
+ drm_WARN_ON(&dev_priv->drm, offset != 0);
+
val = intel_de_read(dev_priv, PIPESRC(pipe));
fb->width = REG_FIELD_GET(PIPESRC_WIDTH_MASK, val) + 1;
fb->height = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, val) + 1;
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index c133928a0655..ad6488e9c2b2 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -444,7 +444,8 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
- if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) {
+ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv) ||
+ (DISPLAY_VER(dev_priv) >= 12)) {
intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0));
@@ -528,31 +529,16 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
enum port port;
enum phy phy;
- /* Program T-INIT master registers */
- for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port),
- DSI_T_INIT_MASTER_MASK, intel_dsi->init_count);
-
/* Program DPHY clock lanes timings */
- for_each_dsi_port(port, intel_dsi->ports) {
+ for_each_dsi_port(port, intel_dsi->ports)
intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port),
intel_dsi->dphy_reg);
- /* shadow register inside display core */
- intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port),
- intel_dsi->dphy_reg);
- }
-
/* Program DPHY data lanes timings */
- for_each_dsi_port(port, intel_dsi->ports) {
+ for_each_dsi_port(port, intel_dsi->ports)
intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port),
intel_dsi->dphy_data_lane_reg);
- /* shadow register inside display core */
- intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port),
- intel_dsi->dphy_data_lane_reg);
- }
-
/*
* If DSI link operating at or below an 800 MHz,
* TA_SURE should be override and programmed to
@@ -561,26 +547,55 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
*/
if (DISPLAY_VER(dev_priv) == 11) {
if (afe_clk(encoder, crtc_state) <= 800000) {
- for_each_dsi_port(port, intel_dsi->ports) {
+ for_each_dsi_port(port, intel_dsi->ports)
intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port),
TA_SURE_MASK,
TA_SURE_OVERRIDE | TA_SURE(0));
-
- /* shadow register inside display core */
- intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port),
- TA_SURE_MASK,
- TA_SURE_OVERRIDE | TA_SURE(0));
- }
}
}
- if (IS_JSL_EHL(dev_priv)) {
+ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
for_each_dsi_phy(phy, intel_dsi->phys)
intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy),
0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP);
}
}
+static void
+gen11_dsi_setup_timings(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+
+ /* Program T-INIT master registers */
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port),
+ DSI_T_INIT_MASTER_MASK, intel_dsi->init_count);
+
+ /* shadow register inside display core */
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port),
+ intel_dsi->dphy_reg);
+
+ /* shadow register inside display core */
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
+
+ /* shadow register inside display core */
+ if (DISPLAY_VER(dev_priv) == 11) {
+ if (afe_clk(encoder, crtc_state) <= 800000) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port),
+ TA_SURE_MASK,
+ TA_SURE_OVERRIDE | TA_SURE(0));
+ }
+ }
+ }
+}
+
static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -1090,11 +1105,15 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
/* step 4c: configure voltage swing and skew */
gen11_dsi_voltage_swing_program_seq(encoder);
+ /* setup D-PHY timings */
+ gen11_dsi_setup_dphy_timings(encoder, crtc_state);
+
/* enable DDI buffer */
gen11_dsi_enable_ddi_buffer(encoder);
- /* setup D-PHY timings */
- gen11_dsi_setup_dphy_timings(encoder, crtc_state);
+ gen11_dsi_gate_clocks(encoder);
+
+ gen11_dsi_setup_timings(encoder, crtc_state);
/* Since transcoder is configured to take events from GPIO */
gen11_dsi_config_util_pin(encoder, true);
@@ -1104,9 +1123,6 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
/* Step (4h, 4i, 4j, 4k): Configure transcoder */
gen11_dsi_configure_transcoder(encoder, crtc_state);
-
- /* Step 4l: Gate DDI clocks */
- gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
@@ -1138,12 +1154,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
"error setting max return pkt size%d\n", tmp);
}
- /* panel power on related mipi dsi vbt sequences */
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
- msleep(intel_dsi->panel_on_delay);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* ensure all panel commands dispatched before enabling transcoder */
wait_for_cmds_dispatched_to_panel(encoder);
@@ -1154,6 +1165,14 @@ static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ intel_dsi_wait_panel_power_cycle(intel_dsi);
+
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+ msleep(intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+
/* step2: enable IO power */
gen11_dsi_enable_io_power(encoder);
@@ -1225,9 +1244,7 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
-
- drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
/* Wa_1409054076:icl,jsl,ehl */
icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, true);
@@ -1238,6 +1255,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
/* step7: enable backlight */
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
@@ -1271,8 +1290,6 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
/* ensure cmds dispatched to panel */
wait_for_cmds_dispatched_to_panel(encoder);
@@ -1373,11 +1390,21 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- struct intel_crtc *crtc = to_intel_crtc(old_conn_state->crtc);
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_backlight_disable(old_conn_state);
+}
+
+static void gen11_dsi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+
+ intel_crtc_vblank_off(old_crtc_state);
/* step2d,e: disable transcoder and wait */
gen11_dsi_disable_transcoder(encoder);
@@ -1391,6 +1418,9 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
/* step2h,i,j: deconfig trancoder */
gen11_dsi_deconfigure_trancoder(encoder);
+ intel_dsc_disable(old_crtc_state);
+ skl_scaler_disable(old_crtc_state);
+
/* step3: disable port */
gen11_dsi_disable_port(encoder);
@@ -1398,18 +1428,13 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
/* step4: disable IO power */
gen11_dsi_disable_io_power(encoder);
-}
-static void gen11_dsi_post_disable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- intel_crtc_vblank_off(old_crtc_state);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
- intel_dsc_disable(old_crtc_state);
+ msleep(intel_dsi->panel_off_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
- skl_scaler_disable(old_crtc_state);
+ intel_dsi->panel_power_off_time = ktime_get_boottime();
}
static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
@@ -1909,7 +1934,8 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
fixed_mode->vdisplay);
}
-void icl_dsi_init(struct drm_i915_private *dev_priv)
+void icl_dsi_init(struct drm_i915_private *dev_priv,
+ const struct intel_bios_encoder_data *devdata)
{
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
@@ -1917,7 +1943,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
struct drm_connector *connector;
enum port port;
- if (!intel_bios_is_dsi_present(dev_priv, &port))
+ port = intel_bios_encoder_port(devdata);
+ if (port == PORT_NONE)
return;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
@@ -1934,6 +1961,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi->attached_connector = intel_connector;
connector = &intel_connector->base;
+ encoder->devdata = devdata;
+
/* register DSI encoder with DRM subsystem */
drm_encoder_init(&dev_priv->drm, &encoder->base, &gen11_dsi_encoder_funcs,
DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
@@ -1957,6 +1986,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->get_power_domains = gen11_dsi_get_power_domains;
encoder->disable_clock = gen11_dsi_gate_clocks;
encoder->is_clock_enabled = gen11_dsi_is_clock_enabled;
+ encoder->shutdown = intel_dsi_shutdown;
/* register DSI connector with DRM subsystem */
drm_connector_init(&dev_priv->drm, connector, &gen11_dsi_connector_funcs,
@@ -1968,7 +1998,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
/* attach connector to encoder */
intel_connector_attach_encoder(intel_connector, encoder);
- encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+ intel_dsi->panel_power_off_time = ktime_get_boottime();
+
intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
mutex_lock(&dev_priv->drm.mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h
index b4861b56b5b2..43fa7d72eeb1 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.h
+++ b/drivers/gpu/drm/i915/display/icl_dsi.h
@@ -7,9 +7,11 @@
#define __ICL_DSI_H__
struct drm_i915_private;
+struct intel_bios_encoder_data;
struct intel_crtc_state;
-void icl_dsi_init(struct drm_i915_private *i915);
+void icl_dsi_init(struct drm_i915_private *dev_priv,
+ const struct intel_bios_encoder_data *devdata);
void icl_dsi_frame_update(struct intel_crtc_state *crtc_state);
#endif /* __ICL_DSI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 4125ee07a271..60a492e186ab 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -212,6 +212,7 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
int width, height;
+ unsigned int rel_data_rate;
if (plane->id == PLANE_CURSOR)
return 0;
@@ -241,7 +242,11 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
height /= 2;
}
- return width * height * fb->format->cpp[color_plane];
+ rel_data_rate = width * height * fb->format->cpp[color_plane];
+
+ return intel_adjusted_rate(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
+ rel_data_rate);
}
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
@@ -722,7 +727,7 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_plane_state *plane_state;
+ struct intel_plane_state __maybe_unused *plane_state;
struct intel_plane *plane;
int i;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 34a397adbd6b..858c959f7bab 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -2230,122 +2230,6 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
return 0;
}
-static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
-{
- enum port port;
-
- if (!ddc_pin)
- return PORT_NONE;
-
- for_each_port(port) {
- const struct intel_bios_encoder_data *devdata =
- i915->display.vbt.ports[port];
-
- if (devdata && ddc_pin == devdata->child.ddc_pin)
- return port;
- }
-
- return PORT_NONE;
-}
-
-static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
- enum port port)
-{
- struct drm_i915_private *i915 = devdata->i915;
- struct child_device_config *child;
- u8 mapped_ddc_pin;
- enum port p;
-
- if (!devdata->child.ddc_pin)
- return;
-
- mapped_ddc_pin = map_ddc_pin(i915, devdata->child.ddc_pin);
- if (!intel_gmbus_is_valid_pin(i915, mapped_ddc_pin)) {
- drm_dbg_kms(&i915->drm,
- "Port %c has invalid DDC pin %d, "
- "sticking to defaults\n",
- port_name(port), mapped_ddc_pin);
- devdata->child.ddc_pin = 0;
- return;
- }
-
- p = get_port_by_ddc_pin(i915, devdata->child.ddc_pin);
- if (p == PORT_NONE)
- return;
-
- drm_dbg_kms(&i915->drm,
- "port %c trying to use the same DDC pin (0x%x) as port %c, "
- "disabling port %c DVI/HDMI support\n",
- port_name(port), mapped_ddc_pin,
- port_name(p), port_name(p));
-
- /*
- * If we have multiple ports supposedly sharing the pin, then dvi/hdmi
- * couldn't exist on the shared port. Otherwise they share the same ddc
- * pin and system couldn't communicate with them separately.
- *
- * Give inverse child device order the priority, last one wins. Yes,
- * there are real machines (eg. Asrock B250M-HDV) where VBT has both
- * port A and port E with the same AUX ch and we must pick port E :(
- */
- child = &i915->display.vbt.ports[p]->child;
-
- child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
- child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
-
- child->ddc_pin = 0;
-}
-
-static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
-{
- enum port port;
-
- if (!aux_ch)
- return PORT_NONE;
-
- for_each_port(port) {
- const struct intel_bios_encoder_data *devdata =
- i915->display.vbt.ports[port];
-
- if (devdata && aux_ch == devdata->child.aux_channel)
- return port;
- }
-
- return PORT_NONE;
-}
-
-static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata,
- enum port port)
-{
- struct drm_i915_private *i915 = devdata->i915;
- struct child_device_config *child;
- enum port p;
-
- p = get_port_by_aux_ch(i915, devdata->child.aux_channel);
- if (p == PORT_NONE)
- return;
-
- drm_dbg_kms(&i915->drm,
- "port %c trying to use the same AUX CH (0x%x) as port %c, "
- "disabling port %c DP support\n",
- port_name(port), devdata->child.aux_channel,
- port_name(p), port_name(p));
-
- /*
- * If we have multiple ports supposedly sharing the aux channel, then DP
- * couldn't exist on the shared port. Otherwise they share the same aux
- * channel and system couldn't communicate with them separately.
- *
- * Give inverse child device order the priority, last one wins. Yes,
- * there are real machines (eg. Asrock B250M-HDV) where VBT has both
- * port A and port E with the same AUX ch and we must pick port E :(
- */
- child = &i915->display.vbt.ports[p]->child;
-
- child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
- child->aux_channel = 0;
-}
-
static u8 dvo_port_type(u8 dvo_port)
{
switch (dvo_port) {
@@ -2490,6 +2374,19 @@ dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
}
}
+enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ const struct child_device_config *child = &devdata->child;
+ enum port port;
+
+ port = dvo_port_to_port(i915, child->dvo_port);
+ if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
+ port = dsi_dvo_port_to_port(i915, child->dvo_port);
+
+ return port;
+}
+
static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
{
switch (vbt_max_link_rate) {
@@ -2600,7 +2497,7 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
}
-static bool
+bool
intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
{
return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
@@ -2615,7 +2512,8 @@ intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->display.vbt.version < 158)
+ if (!devdata || devdata->i915->display.vbt.version < 158 ||
+ DISPLAY_VER(devdata->i915) >= 14)
return -1;
return devdata->child.hdmi_level_shifter_value;
@@ -2658,13 +2556,17 @@ static bool is_port_valid(struct drm_i915_private *i915, enum port port)
return true;
}
-static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
- enum port port)
+static void print_ddi_port(const struct intel_bios_encoder_data *devdata)
{
struct drm_i915_private *i915 = devdata->i915;
const struct child_device_config *child = &devdata->child;
bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
+ enum port port;
+
+ port = intel_bios_encoder_port(devdata);
+ if (port == PORT_NONE)
+ return;
is_dvi = intel_bios_encoder_supports_dvi(devdata);
is_dp = intel_bios_encoder_supports_dp(devdata);
@@ -2728,12 +2630,9 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
{
struct drm_i915_private *i915 = devdata->i915;
- const struct child_device_config *child = &devdata->child;
enum port port;
- port = dvo_port_to_port(i915, child->dvo_port);
- if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
- port = dsi_dvo_port_to_port(i915, child->dvo_port);
+ port = intel_bios_encoder_port(devdata);
if (port == PORT_NONE)
return;
@@ -2744,22 +2643,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
return;
}
- if (i915->display.vbt.ports[port]) {
- drm_dbg_kms(&i915->drm,
- "More than one child device for port %c in VBT, using the first.\n",
- port_name(port));
- return;
- }
-
sanitize_device_type(devdata, port);
-
- if (intel_bios_encoder_supports_dvi(devdata))
- sanitize_ddc_pin(devdata, port);
-
- if (intel_bios_encoder_supports_dp(devdata))
- sanitize_aux_ch(devdata, port);
-
- i915->display.vbt.ports[port] = devdata;
}
static bool has_ddi_port_info(struct drm_i915_private *i915)
@@ -2770,7 +2654,6 @@ static bool has_ddi_port_info(struct drm_i915_private *i915)
static void parse_ddi_ports(struct drm_i915_private *i915)
{
struct intel_bios_encoder_data *devdata;
- enum port port;
if (!has_ddi_port_info(i915))
return;
@@ -2778,10 +2661,8 @@ static void parse_ddi_ports(struct drm_i915_private *i915)
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
parse_ddi_port(devdata);
- for_each_port(port) {
- if (i915->display.vbt.ports[port])
- print_ddi_port(i915->display.vbt.ports[port], port);
- }
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
+ print_ddi_port(devdata);
}
static void
@@ -3706,5 +3587,22 @@ bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
{
- return i915->display.vbt.ports[port];
+ struct intel_bios_encoder_data *devdata;
+
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ if (intel_bios_encoder_port(devdata) == port)
+ return devdata;
+ }
+
+ return NULL;
+}
+
+void intel_bios_for_each_encoder(struct drm_i915_private *i915,
+ void (*func)(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata))
+{
+ struct intel_bios_encoder_data *devdata;
+
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
+ func(i915, devdata);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 45fae97d9719..9680e3e92bb5 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -263,10 +263,12 @@ bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdat
bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata);
+enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata);
enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
@@ -276,4 +278,8 @@ int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata);
+void intel_bios_for_each_encoder(struct drm_i915_private *i915,
+ void (*func)(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata));
+
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 597d5816ad1b..bef96db62c80 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -182,7 +182,7 @@ static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
val2 = intel_uncore_read(&dev_priv->uncore,
MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
- sp->dclk = DIV_ROUND_UP((16667 * dclk), 1000);
+ sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);
@@ -379,7 +379,7 @@ static const struct intel_sa_info mtl_sa_info = {
.deburst = 32,
.deprogbwlimit = 38, /* GB/s */
.displayrtids = 256,
- .derating = 20,
+ .derating = 10,
};
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
@@ -534,10 +534,14 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->deratedbw[j] = min(maxdebw,
bw * (100 - sa->derating) / 100);
+ bi->peakbw[j] = DIV_ROUND_CLOSEST(sp->dclk *
+ num_channels *
+ qi.channel_width, 8);
drm_dbg_kms(&dev_priv->drm,
- "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
- i, j, bi->num_planes, bi->deratedbw[j]);
+ "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
+ i, j, bi->num_planes, bi->deratedbw[j],
+ bi->peakbw[j]);
}
for (j = 0; j < qi.num_psf_points; j++) {
@@ -589,8 +593,8 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
}
-static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
- int num_planes, int qgv_point)
+static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
+ int num_planes, int qgv_point)
{
int i;
@@ -611,14 +615,14 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
return UINT_MAX;
if (num_planes >= bi->num_planes)
- return bi->deratedbw[qgv_point];
+ return i;
}
- return 0;
+ return UINT_MAX;
}
-static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
- int num_planes, int qgv_point)
+static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
+ int num_planes, int qgv_point)
{
int i;
@@ -639,10 +643,10 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
return UINT_MAX;
if (num_planes <= bi->num_planes)
- return bi->deratedbw[qgv_point];
+ return i;
}
- return dev_priv->display.bw.max[0].deratedbw[qgv_point];
+ return 0;
}
static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
@@ -799,6 +803,210 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
+static int mtl_find_qgv_points(struct drm_i915_private *i915,
+ unsigned int data_rate,
+ unsigned int num_active_planes,
+ struct intel_bw_state *new_bw_state)
+{
+ unsigned int best_rate = UINT_MAX;
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int qgv_peak_bw = 0;
+ int i;
+ int ret;
+
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+
+ /*
+ * If SAGV cannot be enabled, disable the pcode SAGV by passing all 1's
+ * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
+ * not enabled. PM Demand code will clamp the value for the register
+ */
+ if (!intel_can_enable_sagv(i915, new_bw_state)) {
+ new_bw_state->qgv_point_peakbw = U16_MAX;
+ drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw.");
+ return 0;
+ }
+
+ /*
+ * Find the best QGV point by comparing the data_rate with max data rate
+ * offered per plane group
+ */
+ for (i = 0; i < num_qgv_points; i++) {
+ unsigned int bw_index =
+ tgl_max_bw_index(i915, num_active_planes, i);
+ unsigned int max_data_rate;
+
+ if (bw_index >= ARRAY_SIZE(i915->display.bw.max))
+ continue;
+
+ max_data_rate = i915->display.bw.max[bw_index].deratedbw[i];
+
+ if (max_data_rate < data_rate)
+ continue;
+
+ if (max_data_rate - data_rate < best_rate) {
+ best_rate = max_data_rate - data_rate;
+ qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i];
+ }
+
+ drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
+ i, max_data_rate, data_rate, qgv_peak_bw);
+ }
+
+ drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
+ qgv_peak_bw, data_rate);
+
+ /*
+ * The display configuration cannot be supported if no QGV point
+ * satisfying the required data rate is found
+ */
+ if (qgv_peak_bw == 0) {
+ drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
+ data_rate, num_active_planes);
+ return -EINVAL;
+ }
+
+ /* MTL PM DEMAND expects QGV BW parameter in multiples of 100 mbps */
+ new_bw_state->qgv_point_peakbw = DIV_ROUND_CLOSEST(qgv_peak_bw, 100);
+
+ return 0;
+}
+
+static int icl_find_qgv_points(struct drm_i915_private *i915,
+ unsigned int data_rate,
+ unsigned int num_active_planes,
+ const struct intel_bw_state *old_bw_state,
+ struct intel_bw_state *new_bw_state)
+{
+ unsigned int max_bw_point = 0;
+ unsigned int max_bw = 0;
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ u16 psf_points = 0;
+ u16 qgv_points = 0;
+ int i;
+ int ret;
+
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_qgv_points; i++) {
+ unsigned int idx;
+ unsigned int max_data_rate;
+
+ if (DISPLAY_VER(i915) > 11)
+ idx = tgl_max_bw_index(i915, num_active_planes, i);
+ else
+ idx = icl_max_bw_index(i915, num_active_planes, i);
+
+ if (idx >= ARRAY_SIZE(i915->display.bw.max))
+ continue;
+
+ max_data_rate = i915->display.bw.max[idx].deratedbw[i];
+
+ /*
+ * We need to know which qgv point gives us
+ * maximum bandwidth in order to disable SAGV
+ * if we find that we exceed SAGV block time
+ * with watermarks. By that moment we already
+ * have those, as it is calculated earlier in
+ * intel_atomic_check,
+ */
+ if (max_data_rate > max_bw) {
+ max_bw_point = i;
+ max_bw = max_data_rate;
+ }
+ if (max_data_rate >= data_rate)
+ qgv_points |= BIT(i);
+
+ drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n",
+ i, max_data_rate, data_rate);
+ }
+
+ for (i = 0; i < num_psf_gv_points; i++) {
+ unsigned int max_data_rate = adl_psf_bw(i915, i);
+
+ if (max_data_rate >= data_rate)
+ psf_points |= BIT(i);
+
+ drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d"
+ " required %d\n",
+ i, max_data_rate, data_rate);
+ }
+
+ /*
+ * BSpec states that we always should have at least one allowed point
+ * left, so if we couldn't - simply reject the configuration for obvious
+ * reasons.
+ */
+ if (qgv_points == 0) {
+ drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory"
+ " bandwidth %d for display configuration(%d active planes).\n",
+ data_rate, num_active_planes);
+ return -EINVAL;
+ }
+
+ if (num_psf_gv_points > 0 && psf_points == 0) {
+ drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory"
+ " bandwidth %d for display configuration(%d active planes).\n",
+ data_rate, num_active_planes);
+ return -EINVAL;
+ }
+
+ /*
+ * Leave only single point with highest bandwidth, if
+ * we can't enable SAGV due to the increased memory latency it may
+ * cause.
+ */
+ if (!intel_can_enable_sagv(i915, new_bw_state)) {
+ qgv_points = BIT(max_bw_point);
+ drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point %d\n",
+ max_bw_point);
+ }
+
+ /*
+ * We store the ones which need to be masked as that is what PCode
+ * actually accepts as a parameter.
+ */
+ new_bw_state->qgv_points_mask =
+ ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) &
+ icl_qgv_points_mask(i915);
+
+ /*
+ * If the actual mask had changed we need to make sure that
+ * the commits are serialized(in case this is a nomodeset, nonblocking)
+ */
+ if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_bw_check_qgv_points(struct drm_i915_private *i915,
+ const struct intel_bw_state *old_bw_state,
+ struct intel_bw_state *new_bw_state)
+{
+ unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state);
+ unsigned int num_active_planes =
+ intel_bw_num_active_planes(i915, new_bw_state);
+
+ data_rate = DIV_ROUND_UP(data_rate, 1000);
+
+ if (DISPLAY_VER(i915) >= 14)
+ return mtl_find_qgv_points(i915, data_rate, num_active_planes,
+ new_bw_state);
+ else
+ return icl_find_qgv_points(i915, data_rate, num_active_planes,
+ old_bw_state, new_bw_state);
+}
+
static bool intel_bw_state_changed(struct drm_i915_private *i915,
const struct intel_bw_state *old_bw_state,
const struct intel_bw_state *new_bw_state)
@@ -1045,20 +1253,14 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
int intel_bw_atomic_check(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_bw_state *old_bw_state;
- struct intel_bw_state *new_bw_state;
- unsigned int data_rate;
- unsigned int num_active_planes;
- int i, ret;
- u16 qgv_points = 0, psf_points = 0;
- unsigned int max_bw_point = 0, max_bw = 0;
- unsigned int num_qgv_points = dev_priv->display.bw.max[0].num_qgv_points;
- unsigned int num_psf_gv_points = dev_priv->display.bw.max[0].num_psf_gv_points;
bool changed = false;
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_bw_state *new_bw_state;
+ const struct intel_bw_state *old_bw_state;
+ int ret;
/* FIXME earlier gens need some checks too */
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(i915) < 11)
return 0;
ret = intel_bw_check_data_rate(state, &changed);
@@ -1069,8 +1271,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- intel_can_enable_sagv(dev_priv, old_bw_state) !=
- intel_can_enable_sagv(dev_priv, new_bw_state))
+ intel_can_enable_sagv(i915, old_bw_state) !=
+ intel_can_enable_sagv(i915, new_bw_state))
changed = true;
/*
@@ -1080,101 +1282,10 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (!changed)
return 0;
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state);
if (ret)
return ret;
- data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
- data_rate = DIV_ROUND_UP(data_rate, 1000);
-
- num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
-
- for (i = 0; i < num_qgv_points; i++) {
- unsigned int max_data_rate;
-
- if (DISPLAY_VER(dev_priv) > 11)
- max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i);
- else
- max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
- /*
- * We need to know which qgv point gives us
- * maximum bandwidth in order to disable SAGV
- * if we find that we exceed SAGV block time
- * with watermarks. By that moment we already
- * have those, as it is calculated earlier in
- * intel_atomic_check,
- */
- if (max_data_rate > max_bw) {
- max_bw_point = i;
- max_bw = max_data_rate;
- }
- if (max_data_rate >= data_rate)
- qgv_points |= BIT(i);
-
- drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
- i, max_data_rate, data_rate);
- }
-
- for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(dev_priv, i);
-
- if (max_data_rate >= data_rate)
- psf_points |= BIT(i);
-
- drm_dbg_kms(&dev_priv->drm, "PSF GV point %d: max bw %d"
- " required %d\n",
- i, max_data_rate, data_rate);
- }
-
- /*
- * BSpec states that we always should have at least one allowed point
- * left, so if we couldn't - simply reject the configuration for obvious
- * reasons.
- */
- if (qgv_points == 0) {
- drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
- " bandwidth %d for display configuration(%d active planes).\n",
- data_rate, num_active_planes);
- return -EINVAL;
- }
-
- if (num_psf_gv_points > 0 && psf_points == 0) {
- drm_dbg_kms(&dev_priv->drm, "No PSF GV points provide sufficient memory"
- " bandwidth %d for display configuration(%d active planes).\n",
- data_rate, num_active_planes);
- return -EINVAL;
- }
-
- /*
- * Leave only single point with highest bandwidth, if
- * we can't enable SAGV due to the increased memory latency it may
- * cause.
- */
- if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
- qgv_points = BIT(max_bw_point);
- drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
- max_bw_point);
- }
-
- /*
- * We store the ones which need to be masked as that is what PCode
- * actually accepts as a parameter.
- */
- new_bw_state->qgv_points_mask =
- ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
- ADLS_PCODE_REQ_PSF_PT(psf_points)) &
- icl_qgv_points_mask(dev_priv);
-
- /*
- * If the actual mask had changed we need to make sure that
- * the commits are serialized(in case this is a nomodeset, nonblocking)
- */
- if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index f20292143745..59cb4fc5db76 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -35,6 +35,12 @@ struct intel_bw_state {
u8 active_pipes;
/*
+ * From MTL onwards, to lock a QGV point, punit expects the peak BW of
+ * the selected QGV point as the parameter in multiples of 100MB/s
+ */
+ u16 qgv_point_peakbw;
+
+ /*
* Current QGV points mask, which restricts
* some particular SAGV states, not to confuse
* with pipe_sagv_mask.
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 1a5268e3d0a3..2fb030b1ff1d 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -37,6 +37,7 @@
#include "intel_pci_config.h"
#include "intel_pcode.h"
#include "intel_psr.h"
+#include "intel_vdsc.h"
#include "vlv_sideband.h"
/**
@@ -469,7 +470,7 @@ static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
cdclk_config->cdclk = 450000;
- else if (IS_HSW_ULT(dev_priv))
+ else if (IS_HASWELL_ULT(dev_priv))
cdclk_config->cdclk = 337500;
else
cdclk_config->cdclk = 540000;
@@ -2607,9 +2608,16 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
* When we decide to use only one VDSC engine, since
* each VDSC operates with 1 ppc throughput, pixel clock
* cannot be higher than the VDSC clock (cdclk)
+ * If there 2 VDSC engines, then pixel clock can't be higher than
+ * VDSC clock(cdclk) * 2 and so on.
*/
- if (crtc_state->dsc.compression_enable && !crtc_state->dsc.dsc_split)
- min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+ if (crtc_state->dsc.compression_enable) {
+ int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
+
+ min_cdclk = max_t(int, min_cdclk,
+ DIV_ROUND_UP(crtc_state->pixel_rate,
+ num_vdsc_instances));
+ }
/*
* HACK. Currently for TGL/DG2 platforms we calculate
@@ -2959,7 +2967,7 @@ int intel_cdclk_atomic_check(struct intel_atomic_state *state,
{
const struct intel_cdclk_state *old_cdclk_state;
const struct intel_cdclk_state *new_cdclk_state;
- struct intel_plane_state *plane_state;
+ struct intel_plane_state __maybe_unused *plane_state;
struct intel_plane *plane;
int ret;
int i;
@@ -3147,7 +3155,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- if (IS_JSL_EHL(dev_priv)) {
+ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
if (dev_priv->display.cdclk.hw.ref == 24000)
dev_priv->display.cdclk.max_cdclk_freq = 552000;
else
@@ -3192,9 +3200,9 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
*/
if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
dev_priv->display.cdclk.max_cdclk_freq = 450000;
- else if (IS_BDW_ULX(dev_priv))
+ else if (IS_BROADWELL_ULX(dev_priv))
dev_priv->display.cdclk.max_cdclk_freq = 450000;
- else if (IS_BDW_ULT(dev_priv))
+ else if (IS_BROADWELL_ULT(dev_priv))
dev_priv->display.cdclk.max_cdclk_freq = 540000;
else
dev_priv->display.cdclk.max_cdclk_freq = 675000;
@@ -3559,10 +3567,10 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
/* Wa_22011320316:adl-p[a0] */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
- } else if (IS_ADLP_RPLU(dev_priv)) {
+ } else if (IS_RAPTORLAKE_U(dev_priv)) {
dev_priv->display.cdclk.table = rplu_cdclk_table;
dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
} else {
@@ -3575,7 +3583,7 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
} else if (DISPLAY_VER(dev_priv) >= 12) {
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
dev_priv->display.cdclk.table = icl_cdclk_table;
- } else if (IS_JSL_EHL(dev_priv)) {
+ } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs;
dev_priv->display.cdclk.table = icl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 11) {
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 8966e6560516..454607b4a02a 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -1453,6 +1453,16 @@ static int glk_degamma_lut_size(struct drm_i915_private *i915)
return 35;
}
+/*
+ * change_lut_val_precision: helper function to upscale or downscale lut values.
+ * Parameters 'to' and 'from' needs to be less than 32. This should be sufficient
+ * as currently there are no lut values exceeding 32 bit.
+ */
+static u32 change_lut_val_precision(u32 lut_val, int to, int from)
+{
+ return mul_u32_u32(lut_val, (1 << to)) / (1 << from);
+}
+
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
@@ -1487,8 +1497,15 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
+ u32 lut_val;
+
+ if (DISPLAY_VER(i915) >= 14)
+ lut_val = change_lut_val_precision(lut[i].green, 24, 16);
+ else
+ lut_val = lut[i].green;
+
ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
- lut[i].green);
+ lut_val);
}
/* Clamp values > 1.0. */
@@ -3439,6 +3456,14 @@ static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc)
for (i = 0; i < lut_size; i++) {
u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe));
+ /*
+ * For MTL and beyond, convert back the 24 bit lut values
+ * read from HW to 16 bit values to maintain parity with
+ * userspace values
+ */
+ if (DISPLAY_VER(dev_priv) >= 14)
+ val = change_lut_val_precision(val, 16, 24);
+
lut[i].red = val;
lut[i].green = val;
lut[i].blue = val;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 922a6d87b553..e2a220cf2e57 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -141,7 +141,7 @@ static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
if (IS_ALDERLAKE_S(i915))
return phy == PHY_A;
- else if (IS_JSL_EHL(i915) ||
+ else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) ||
IS_ROCKETLAKE(i915) ||
IS_DG1(i915))
return phy < PHY_C;
@@ -242,7 +242,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
- if (IS_JSL_EHL(dev_priv)) {
+ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
if (ehl_vbt_ddi_d_present(dev_priv))
expected_val = ICL_PHY_MISC_MUX_DDID;
@@ -333,7 +333,8 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
* "internal" child devices.
*/
val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
- if (IS_JSL_EHL(dev_priv) && phy == PHY_A) {
+ if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ phy == PHY_A) {
val &= ~ICL_PHY_MISC_MUX_DDID;
if (ehl_vbt_ddi_d_present(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 00ea71b03ec7..ff3bcadebe59 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -280,14 +280,14 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
void
intel_attach_hdmi_colorspace_property(struct drm_connector *connector)
{
- if (!drm_mode_create_hdmi_colorspace_property(connector))
+ if (!drm_mode_create_hdmi_colorspace_property(connector, 0))
drm_connector_attach_colorspace_property(connector);
}
void
intel_attach_dp_colorspace_property(struct drm_connector *connector)
{
- if (!drm_mode_create_dp_colorspace_property(connector))
+ if (!drm_mode_create_dp_colorspace_property(connector, 0))
drm_connector_attach_colorspace_property(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index ab7cd5e60a0a..809074758687 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -1064,6 +1064,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
}
if (HAS_DDI(dev_priv)) {
+ assert_port_valid(dev_priv, PORT_E);
+
crt->base.port = PORT_E;
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 0600fdcd06ef..1b00ef2c6185 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -116,6 +116,7 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
+ intel_cx0_bus_reset(i915, port, lane);
return -ETIMEDOUT;
}
@@ -158,10 +159,8 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
XELPDP_PORT_M2P_ADDRESS(addr));
ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
- if (ack < 0) {
- intel_cx0_bus_reset(i915, port, lane);
+ if (ack < 0)
return ack;
- }
intel_clear_response_ready_flag(i915, port, lane);
@@ -202,6 +201,7 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
int lane, u16 addr, u8 data, bool committed)
{
enum phy phy = intel_port_to_phy(i915, port);
+ int ack;
u32 val;
if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
@@ -230,10 +230,9 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
}
if (committed) {
- if (intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val) < 0) {
- intel_cx0_bus_reset(i915, port, lane);
- return -EINVAL;
- }
+ ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
+ if (ack < 0)
+ return ack;
} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) &
XELPDP_PORT_P2M_ERROR_SET)) {
drm_dbg_kms(&i915->drm,
@@ -2435,7 +2434,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
- XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLB, val);
+ XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
+ XELPDP_SSC_ENABLE_PLLB, val);
}
static u32 intel_cx0_get_powerdown_update(u8 lane_mask)
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index f99809af257d..4c4db5cdcbd0 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -43,8 +43,5 @@ int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
-void intel_cx0_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- u32 level);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 70d44edd8c6e..84bbf854337a 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "icl_dsi.h"
#include "intel_audio.h"
#include "intel_audio_regs.h"
#include "intel_backlight.h"
@@ -2225,12 +2226,10 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp;
if (!crtc_state->fec_enable)
return;
- intel_dp = enc_to_intel_dp(encoder);
intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
0, DP_TP_CTL_FEC_ENABLE);
}
@@ -2239,12 +2238,10 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp;
if (!crtc_state->fec_enable)
return;
- intel_dp = enc_to_intel_dp(encoder);
intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_FEC_ENABLE, 0);
intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
@@ -3586,7 +3583,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
{
if (DISPLAY_VER(dev_priv) >= 12 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
- else if (IS_JSL_EHL(dev_priv) && crtc_state->port_clock > 594000)
+ else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 3;
else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
@@ -4657,13 +4655,95 @@ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
-void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
+static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
+{
+ /* straps not used on skl+ */
+ if (DISPLAY_VER(i915) >= 9)
+ return true;
+
+ switch (port) {
+ case PORT_A:
+ return intel_de_read(i915, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+ case PORT_B:
+ return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
+ case PORT_C:
+ return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
+ case PORT_D:
+ return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
+ case PORT_E:
+ return true; /* no strap for DDI-E */
+ default:
+ MISSING_CASE(port);
+ return false;
+ }
+}
+
+static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return init_dp || intel_phy_is_tc(i915, phy);
+}
+
+static bool assert_has_icl_dsi(struct drm_i915_private *i915)
+{
+ return !drm_WARN(&i915->drm, !IS_ALDERLAKE_P(i915) &&
+ !IS_TIGERLAKE(i915) && DISPLAY_VER(i915) != 11,
+ "Platform does not support DSI\n");
+}
+
+static bool port_in_use(struct drm_i915_private *i915, enum port port)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&i915->drm, encoder) {
+ /* FIXME what about second port for dual link DSI? */
+ if (encoder->port == port)
+ return true;
+ }
+
+ return false;
+}
+
+void intel_ddi_init(struct drm_i915_private *dev_priv,
+ const struct intel_bios_encoder_data *devdata)
{
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
- const struct intel_bios_encoder_data *devdata;
bool init_hdmi, init_dp;
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum port port;
+ enum phy phy;
+
+ port = intel_bios_encoder_port(devdata);
+ if (port == PORT_NONE)
+ return;
+
+ if (!port_strap_detected(dev_priv, port)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Port %c strap not detected\n", port_name(port));
+ return;
+ }
+
+ if (!assert_port_valid(dev_priv, port))
+ return;
+
+ if (port_in_use(dev_priv, port)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Port %c already claimed\n", port_name(port));
+ return;
+ }
+
+ if (intel_bios_encoder_supports_dsi(devdata)) {
+ /* BXT/GLK handled elsewhere, for now at least */
+ if (!assert_has_icl_dsi(dev_priv))
+ return;
+
+ icl_dsi_init(dev_priv, devdata);
+ return;
+ }
+
+ phy = intel_port_to_phy(dev_priv, port);
/*
* On platforms with HTI (aka HDPORT), if it's enabled at boot it may
@@ -4677,14 +4757,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
return;
}
- devdata = intel_bios_encoder_data_lookup(dev_priv, port);
- if (!devdata) {
- drm_dbg_kms(&dev_priv->drm,
- "VBT says port %c is not present\n",
- port_name(port));
- return;
- }
-
init_hdmi = intel_bios_encoder_supports_dvi(devdata) ||
intel_bios_encoder_supports_hdmi(devdata);
init_dp = intel_bios_encoder_supports_dp(devdata);
@@ -4719,6 +4791,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
if (!dig_port)
return;
+ dig_port->aux_ch = AUX_CH_NONE;
+
encoder = &dig_port->base;
encoder->devdata = devdata;
@@ -4805,7 +4879,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->disable_clock = dg1_ddi_disable_clock;
encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
encoder->get_config = dg1_ddi_get_config;
- } else if (IS_JSL_EHL(dev_priv)) {
+ } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
if (intel_ddi_is_tc(dev_priv, port)) {
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
@@ -4876,7 +4950,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
else if (DISPLAY_VER(dev_priv) >= 12)
encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
- else if (IS_JSL_EHL(dev_priv))
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
else if (DISPLAY_VER(dev_priv) == 11)
encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
@@ -4899,7 +4973,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
- dig_port->aux_ch = intel_dp_aux_ch(encoder);
+
+ if (need_aux_ch(encoder, init_dp)) {
+ dig_port->aux_ch = intel_dp_aux_ch(encoder);
+ if (dig_port->aux_ch == AUX_CH_NONE)
+ goto err;
+ }
if (intel_phy_is_tc(dev_priv, phy)) {
bool is_legacy =
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 2bc034042a93..4999c0ee229b 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -11,6 +11,7 @@
struct drm_connector_state;
struct drm_i915_private;
struct intel_atomic_state;
+struct intel_bios_encoder_data;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
@@ -50,7 +51,8 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port);
-void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
+void intel_ddi_init(struct drm_i915_private *dev_priv,
+ const struct intel_bios_encoder_data *devdata);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index b7d20485bde5..de809e2d9cac 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -1049,12 +1049,26 @@ static const union intel_ddi_buf_trans_entry _mtl_c10_trans_dp14[] = {
{ .snps = { 62, 0, 0 } }, /* preset 9 */
};
-static const struct intel_ddi_buf_trans mtl_cx0_trans = {
+static const struct intel_ddi_buf_trans mtl_c10_trans_dp14 = {
.entries = _mtl_c10_trans_dp14,
.num_entries = ARRAY_SIZE(_mtl_c10_trans_dp14),
.hdmi_default_entry = ARRAY_SIZE(_mtl_c10_trans_dp14) - 1,
};
+/* DP1.4 */
+static const union intel_ddi_buf_trans_entry _mtl_c20_trans_dp14[] = {
+ { .snps = { 20, 0, 0 } }, /* preset 0 */
+ { .snps = { 24, 0, 4 } }, /* preset 1 */
+ { .snps = { 30, 0, 9 } }, /* preset 2 */
+ { .snps = { 34, 0, 14 } }, /* preset 3 */
+ { .snps = { 29, 0, 0 } }, /* preset 4 */
+ { .snps = { 34, 0, 5 } }, /* preset 5 */
+ { .snps = { 38, 0, 10 } }, /* preset 6 */
+ { .snps = { 36, 0, 0 } }, /* preset 7 */
+ { .snps = { 40, 0, 6 } }, /* preset 8 */
+ { .snps = { 48, 0, 0 } }, /* preset 9 */
+};
+
/* DP2.0 */
static const union intel_ddi_buf_trans_entry _mtl_c20_trans_uhbr[] = {
{ .snps = { 48, 0, 0 } }, /* preset 0 */
@@ -1072,7 +1086,7 @@ static const union intel_ddi_buf_trans_entry _mtl_c20_trans_uhbr[] = {
{ .snps = { 37, 4, 7 } }, /* preset 12 */
{ .snps = { 33, 4, 11 } }, /* preset 13 */
{ .snps = { 40, 8, 0 } }, /* preset 14 */
- { .snps = { 28, 2, 2 } }, /* preset 15 */
+ { .snps = { 30, 2, 2 } }, /* preset 15 */
};
/* HDMI2.0 */
@@ -1090,6 +1104,12 @@ static const struct intel_ddi_buf_trans mtl_c20_trans_hdmi = {
.hdmi_default_entry = 0,
};
+static const struct intel_ddi_buf_trans mtl_c20_trans_dp14 = {
+ .entries = _mtl_c20_trans_dp14,
+ .num_entries = ARRAY_SIZE(_mtl_c20_trans_dp14),
+ .hdmi_default_entry = ARRAY_SIZE(_mtl_c20_trans_dp14) - 1,
+};
+
static const struct intel_ddi_buf_trans mtl_c20_trans_uhbr = {
.entries = _mtl_c20_trans_uhbr,
.num_entries = ARRAY_SIZE(_mtl_c20_trans_uhbr),
@@ -1390,7 +1410,7 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (crtc_state->port_clock > 270000) {
- if (IS_TGL_UY(dev_priv)) {
+ if (IS_TIGERLAKE_UY(dev_priv)) {
return intel_get_buf_trans(&tgl_uy_combo_phy_trans_dp_hbr2,
n_entries);
} else {
@@ -1678,8 +1698,10 @@ mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_is_c10phy(i915, phy)))
return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries);
+ else if (!intel_is_c10phy(i915, phy))
+ return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
else
- return intel_get_buf_trans(&mtl_cx0_trans, n_entries);
+ return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
}
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
@@ -1718,15 +1740,15 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
encoder->get_buf_trans = icl_get_mg_buf_trans;
} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
encoder->get_buf_trans = bxt_get_buf_trans;
- } else if (IS_CML_ULX(i915) || IS_CFL_ULX(i915) || IS_KBL_ULX(i915)) {
+ } else if (IS_COMETLAKE_ULX(i915) || IS_COFFEELAKE_ULX(i915) || IS_KABYLAKE_ULX(i915)) {
encoder->get_buf_trans = kbl_y_get_buf_trans;
- } else if (IS_CML_ULT(i915) || IS_CFL_ULT(i915) || IS_KBL_ULT(i915)) {
+ } else if (IS_COMETLAKE_ULT(i915) || IS_COFFEELAKE_ULT(i915) || IS_KABYLAKE_ULT(i915)) {
encoder->get_buf_trans = kbl_u_get_buf_trans;
} else if (IS_COMETLAKE(i915) || IS_COFFEELAKE(i915) || IS_KABYLAKE(i915)) {
encoder->get_buf_trans = kbl_get_buf_trans;
- } else if (IS_SKL_ULX(i915)) {
+ } else if (IS_SKYLAKE_ULX(i915)) {
encoder->get_buf_trans = skl_y_get_buf_trans;
- } else if (IS_SKL_ULT(i915)) {
+ } else if (IS_SKYLAKE_ULT(i915)) {
encoder->get_buf_trans = skl_u_get_buf_trans;
} else if (IS_SKYLAKE(i915)) {
encoder->get_buf_trans = skl_get_buf_trans;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index f51a55f4e9d0..763ab569d8f3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -53,7 +53,6 @@
#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_wm.h"
-#include "icl_dsi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_audio.h"
@@ -99,6 +98,7 @@
#include "intel_pcode.h"
#include "intel_pipe_crc.h"
#include "intel_plane_initial.h"
+#include "intel_pmdemand.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_sdvo.h"
@@ -971,7 +971,7 @@ static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
u8 update_planes = crtc_state->update_planes;
- const struct intel_plane_state *plane_state;
+ const struct intel_plane_state __maybe_unused *plane_state;
struct intel_plane *plane;
int i;
@@ -988,7 +988,7 @@ static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
u8 update_planes = crtc_state->update_planes;
- const struct intel_plane_state *plane_state;
+ const struct intel_plane_state __maybe_unused *plane_state;
struct intel_plane *plane;
int i;
@@ -1749,7 +1749,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
return phy <= PHY_E;
else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
return phy <= PHY_D;
- else if (IS_JSL_EHL(dev_priv))
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
return phy <= PHY_C;
else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
return phy <= PHY_B;
@@ -1801,7 +1801,8 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
return PHY_B + port - PORT_TC1;
else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
return PHY_C + port - PORT_TC1;
- else if (IS_JSL_EHL(i915) && port == PORT_D)
+ else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ port == PORT_D)
return PHY_A;
return PHY_A + port - PORT_A;
@@ -3152,6 +3153,10 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(dev_priv) >= 12)
val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
+ /* allow PSR with sprite enabled */
+ if (IS_BROADWELL(dev_priv))
+ val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE;
+
intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
}
@@ -4563,7 +4568,6 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
saved_state->uapi = slave_crtc_state->uapi;
saved_state->scaler_state = slave_crtc_state->scaler_state;
saved_state->shared_dpll = slave_crtc_state->shared_dpll;
- saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
saved_state->crc_enabled = slave_crtc_state->crc_enabled;
intel_crtc_free_hw_state(slave_crtc_state);
@@ -5617,7 +5621,7 @@ static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_crtc *other)
{
- const struct intel_plane_state *plane_state;
+ const struct intel_plane_state __maybe_unused *plane_state;
struct intel_plane *plane;
u8 plane_ids = 0;
int i;
@@ -5660,7 +5664,7 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_plane_state *plane_state;
+ struct intel_plane_state __maybe_unused *plane_state;
struct intel_plane *plane;
struct intel_crtc *crtc;
int i, ret;
@@ -5715,7 +5719,7 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state)
static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
{
- struct intel_crtc_state *crtc_state;
+ struct intel_crtc_state __maybe_unused *crtc_state;
struct intel_crtc *crtc;
int i;
@@ -6012,8 +6016,9 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
*/
if (DISPLAY_VER(i915) < 12) {
drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] Modifier does not support async flips\n",
- plane->base.base.id, plane->base.name);
+ "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n",
+ plane->base.base.id, plane->base.name,
+ new_plane_state->hw.fb->modifier, DISPLAY_VER(i915));
return -EINVAL;
}
break;
@@ -6025,8 +6030,9 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
break;
default:
drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] Modifier does not support async flips\n",
- plane->base.base.id, plane->base.name);
+ "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
+ plane->base.base.id, plane->base.name,
+ new_plane_state->hw.fb->modifier);
return -EINVAL;
}
@@ -6352,6 +6358,10 @@ int intel_atomic_check(struct drm_device *dev,
return ret;
}
+ ret = intel_pmdemand_atomic_check(state);
+ if (ret)
+ goto fail;
+
ret = intel_atomic_check_crtcs(state);
if (ret)
goto fail;
@@ -6997,6 +7007,14 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
crtc->config = new_crtc_state;
+ /*
+ * In XE_LPD+ Pmdemand combines many parameters such as voltage index,
+ * plls, cdclk frequency, QGV point selection parameter etc. Voltage
+ * index, cdclk/ddiclk frequencies are supposed to be configured before
+ * the cdclk config is set.
+ */
+ intel_pmdemand_pre_plane_update(state);
+
if (state->modeset) {
drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
@@ -7116,6 +7134,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_verify_planes(state);
intel_sagv_post_plane_update(state);
+ intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
@@ -7128,7 +7147,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*/
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
}
- intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
+ /*
+ * Delay re-enabling DC states by 17 ms to avoid the off->on->off
+ * toggling overhead at and above 60 FPS.
+ */
+ intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*
@@ -7164,11 +7187,12 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
break;
case FENCE_FREE:
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_atomic_helper *helper =
- &to_i915(state->base.dev)->display.atomic_helper;
+ &i915->display.atomic_helper;
if (llist_add(&state->freed, &helper->free_list))
- schedule_work(&helper->free_work);
+ queue_work(i915->unordered_wq, &helper->free_work);
break;
}
}
@@ -7354,7 +7378,7 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) >= 9)
return false;
- if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
return false;
if (HAS_PCH_LPT_H(dev_priv) &&
@@ -7371,6 +7395,12 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
return true;
}
+bool assert_port_valid(struct drm_i915_private *i915, enum port port)
+{
+ return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)),
+ "Platform does not support port %c\n", port_name(port));
+}
+
void intel_setup_outputs(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
@@ -7381,93 +7411,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_METEORLAKE(dev_priv)) {
- intel_ddi_init(dev_priv, PORT_A);
- intel_ddi_init(dev_priv, PORT_B);
- intel_ddi_init(dev_priv, PORT_TC1);
- intel_ddi_init(dev_priv, PORT_TC2);
- intel_ddi_init(dev_priv, PORT_TC3);
- intel_ddi_init(dev_priv, PORT_TC4);
- } else if (IS_DG2(dev_priv)) {
- intel_ddi_init(dev_priv, PORT_A);
- intel_ddi_init(dev_priv, PORT_B);
- intel_ddi_init(dev_priv, PORT_C);
- intel_ddi_init(dev_priv, PORT_D_XELPD);
- intel_ddi_init(dev_priv, PORT_TC1);
- } else if (IS_ALDERLAKE_P(dev_priv)) {
- intel_ddi_init(dev_priv, PORT_A);
- intel_ddi_init(dev_priv, PORT_B);
- intel_ddi_init(dev_priv, PORT_TC1);
- intel_ddi_init(dev_priv, PORT_TC2);
- intel_ddi_init(dev_priv, PORT_TC3);
- intel_ddi_init(dev_priv, PORT_TC4);
- icl_dsi_init(dev_priv);
- } else if (IS_ALDERLAKE_S(dev_priv)) {
- intel_ddi_init(dev_priv, PORT_A);
- intel_ddi_init(dev_priv, PORT_TC1);
- intel_ddi_init(dev_priv, PORT_TC2);
- intel_ddi_init(dev_priv, PORT_TC3);
- intel_ddi_init(dev_priv, PORT_TC4);
- } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
- intel_ddi_init(dev_priv, PORT_A);
- intel_ddi_init(dev_priv, PORT_B);
- intel_ddi_init(dev_priv, PORT_TC1);
- intel_ddi_init(dev_priv, PORT_TC2);
- } else if (DISPLAY_VER(dev_priv) >= 12) {
- intel_ddi_init(dev_priv, PORT_A);
- intel_ddi_init(dev_priv, PORT_B);
- intel_ddi_init(dev_priv, PORT_TC1);
- intel_ddi_init(dev_priv, PORT_TC2);
- intel_ddi_init(dev_priv, PORT_TC3);
- intel_ddi_init(dev_priv, PORT_TC4);
- intel_ddi_init(dev_priv, PORT_TC5);
- intel_ddi_init(dev_priv, PORT_TC6);
- icl_dsi_init(dev_priv);
- } else if (IS_JSL_EHL(dev_priv)) {
- intel_ddi_init(dev_priv, PORT_A);
- intel_ddi_init(dev_priv, PORT_B);
- intel_ddi_init(dev_priv, PORT_C);
- intel_ddi_init(dev_priv, PORT_D);
- icl_dsi_init(dev_priv);
- } else if (DISPLAY_VER(dev_priv) == 11) {
- intel_ddi_init(dev_priv, PORT_A);
- intel_ddi_init(dev_priv, PORT_B);
- intel_ddi_init(dev_priv, PORT_C);
- intel_ddi_init(dev_priv, PORT_D);
- intel_ddi_init(dev_priv, PORT_E);
- intel_ddi_init(dev_priv, PORT_F);
- icl_dsi_init(dev_priv);
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- intel_ddi_init(dev_priv, PORT_A);
- intel_ddi_init(dev_priv, PORT_B);
- intel_ddi_init(dev_priv, PORT_C);
- vlv_dsi_init(dev_priv);
- } else if (DISPLAY_VER(dev_priv) >= 9) {
- intel_ddi_init(dev_priv, PORT_A);
- intel_ddi_init(dev_priv, PORT_B);
- intel_ddi_init(dev_priv, PORT_C);
- intel_ddi_init(dev_priv, PORT_D);
- intel_ddi_init(dev_priv, PORT_E);
- } else if (HAS_DDI(dev_priv)) {
- u32 found;
-
+ if (HAS_DDI(dev_priv)) {
if (intel_ddi_crt_present(dev_priv))
intel_crt_init(dev_priv);
- /* Haswell uses DDI functions to detect digital outputs. */
- found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
- if (found)
- intel_ddi_init(dev_priv, PORT_A);
-
- found = intel_de_read(dev_priv, SFUSE_STRAP);
- if (found & SFUSE_STRAP_DDIB_DETECTED)
- intel_ddi_init(dev_priv, PORT_B);
- if (found & SFUSE_STRAP_DDIC_DETECTED)
- intel_ddi_init(dev_priv, PORT_C);
- if (found & SFUSE_STRAP_DDID_DETECTED)
- intel_ddi_init(dev_priv, PORT_D);
- if (found & SFUSE_STRAP_DDIF_DETECTED)
- intel_ddi_init(dev_priv, PORT_F);
+ intel_bios_for_each_encoder(dev_priv, intel_ddi_init);
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ vlv_dsi_init(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index c744c021af23..49ac8473b988 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -113,7 +113,7 @@ enum i9xx_plane_id {
#define for_each_dbuf_slice(__dev_priv, __slice) \
for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \
- for_each_if(INTEL_INFO(__dev_priv)->display->dbuf.slice_mask & BIT(__slice))
+ for_each_if(DISPLAY_INFO(__dev_priv)->dbuf.slice_mask & BIT(__slice))
#define for_each_dbuf_slice_in_mask(__dev_priv, __slice, __mask) \
for_each_dbuf_slice((__dev_priv), (__slice)) \
@@ -539,6 +539,8 @@ void assert_transcoder(struct drm_i915_private *dev_priv,
#define assert_transcoder_enabled(d, t) assert_transcoder(d, t, true)
#define assert_transcoder_disabled(d, t) assert_transcoder(d, t, false)
+bool assert_port_valid(struct drm_i915_private *i915, enum port port);
+
/*
* Use I915_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw state sanity
* checks to check for unexpected conditions which may not necessarily be a user
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 2209811eb29e..53e5c33e08c3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -17,6 +17,7 @@
#include <drm/drm_modeset_lock.h>
#include "intel_cdclk.h"
+#include "intel_display_device.h"
#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
@@ -33,7 +34,6 @@ struct i915_audio_component;
struct i915_hdcp_arbiter;
struct intel_atomic_state;
struct intel_audio_funcs;
-struct intel_bios_encoder_data;
struct intel_cdclk_funcs;
struct intel_cdclk_vals;
struct intel_color_funcs;
@@ -218,7 +218,6 @@ struct intel_vbt_data {
struct list_head display_devices;
struct list_head bdb_blocks;
- struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
@@ -314,6 +313,8 @@ struct intel_display {
unsigned int deratedbw[I915_NUM_QGV_POINTS];
/* for each PSF GV point */
unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
+ /* Peak BW for each QGV point */
+ unsigned int peakbw[I915_NUM_QGV_POINTS];
u8 num_qgv_points;
u8 num_psf_gv_points;
u8 num_planes;
@@ -344,6 +345,15 @@ struct intel_display {
} dbuf;
struct {
+ wait_queue_head_t waitqueue;
+
+ /* mutex to protect pmdemand programming sequence */
+ struct mutex lock;
+
+ struct intel_global_obj obj;
+ } pmdemand;
+
+ struct {
/*
* dkl.phy_lock protects against concurrent access of the
* Dekel TypeC PHYs.
@@ -419,6 +429,14 @@ struct intel_display {
} hti;
struct {
+ /* Access with DISPLAY_INFO() */
+ const struct intel_display_device_info *__device_info;
+
+ /* Access with DISPLAY_RUNTIME_INFO() */
+ struct intel_display_runtime_info __runtime_info;
+ } info;
+
+ struct {
bool false_color;
} ips;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 2a4df62692a6..63c1fb9e479f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -6,6 +6,7 @@
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include "hsw_ips.h"
@@ -228,19 +229,18 @@ out:
seq_puts(m, "\n");
}
-static void intel_dp_info(struct seq_file *m,
- struct intel_connector *intel_connector)
+static void intel_dp_info(struct seq_file *m, struct intel_connector *connector)
{
- struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
- const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
+ const struct edid *edid = drm_edid_raw(connector->detect_edid);
seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
seq_printf(m, "\taudio support: %s\n",
- str_yes_no(intel_connector->base.display_info.has_audio));
+ str_yes_no(connector->base.display_info.has_audio));
drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
- edid ? edid->data : NULL, &intel_dp->aux);
+ edid, &intel_dp->aux);
}
static void intel_dp_mst_info(struct seq_file *m,
@@ -819,8 +819,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
if (IS_ERR(input_buffer))
return PTR_ERR(input_buffer);
- drm_dbg(&to_i915(dev)->drm,
- "Copied %d bytes from user\n", (unsigned int)len);
+ drm_dbg(dev, "Copied %d bytes from user\n", (unsigned int)len);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
@@ -839,8 +838,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
status = kstrtoint(input_buffer, 10, &val);
if (status < 0)
break;
- drm_dbg(&to_i915(dev)->drm,
- "Got %d for test active\n", val);
+ drm_dbg(dev, "Got %d for test active\n", val);
/* To prevent erroneous activation of the compliance
* testing code, only accept an actual value of 1 here
*/
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 464df1764a86..c39f8a15d8aa 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -9,14 +9,13 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display.h"
#include "intel_display_device.h"
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
#include "intel_fbc.h"
-__diag_push();
-__diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
-
static const struct intel_display_device_info no_display = {};
#define PIPE_A_OFFSET 0x70000
@@ -185,10 +184,6 @@ static const struct intel_display_device_info no_display = {};
.__runtime_defaults.cpu_transcoder_mask = \
BIT(TRANSCODER_A) | BIT(TRANSCODER_B)
-static const struct intel_display_device_info i830_display = {
- I830_DISPLAY,
-};
-
#define I845_DISPLAY \
.has_overlay = 1, \
.overlay_needs_physical = 1, \
@@ -201,19 +196,29 @@ static const struct intel_display_device_info i830_display = {
.__runtime_defaults.pipe_mask = BIT(PIPE_A), \
.__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A)
+static const struct intel_display_device_info i830_display = {
+ I830_DISPLAY,
+
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C), /* DVO A/B/C */
+};
+
static const struct intel_display_device_info i845_display = {
I845_DISPLAY,
+
+ .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */
};
static const struct intel_display_device_info i85x_display = {
I830_DISPLAY,
+ .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */
.__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
};
static const struct intel_display_device_info i865g_display = {
I845_DISPLAY,
+ .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* DVO B/C */
.__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
};
@@ -222,21 +227,23 @@ static const struct intel_display_device_info i865g_display = {
.has_overlay = 1, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
- I9XX_COLORS, \
\
.__runtime_defaults.ip.ver = 3, \
.__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.__runtime_defaults.cpu_transcoder_mask = \
- BIT(TRANSCODER_A) | BIT(TRANSCODER_B)
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) /* SDVO B/C */
static const struct intel_display_device_info i915g_display = {
GEN3_DISPLAY,
+ I845_COLORS,
.cursor_needs_physical = 1,
.overlay_needs_physical = 1,
};
static const struct intel_display_device_info i915gm_display = {
GEN3_DISPLAY,
+ I9XX_COLORS,
.cursor_needs_physical = 1,
.overlay_needs_physical = 1,
.supports_tv = 1,
@@ -246,6 +253,7 @@ static const struct intel_display_device_info i915gm_display = {
static const struct intel_display_device_info i945g_display = {
GEN3_DISPLAY,
+ I845_COLORS,
.has_hotplug = 1,
.cursor_needs_physical = 1,
.overlay_needs_physical = 1,
@@ -253,6 +261,7 @@ static const struct intel_display_device_info i945g_display = {
static const struct intel_display_device_info i945gm_display = {
GEN3_DISPLAY,
+ I9XX_COLORS,
.has_hotplug = 1,
.cursor_needs_physical = 1,
.overlay_needs_physical = 1,
@@ -263,6 +272,13 @@ static const struct intel_display_device_info i945gm_display = {
static const struct intel_display_device_info g33_display = {
GEN3_DISPLAY,
+ I845_COLORS,
+ .has_hotplug = 1,
+};
+
+static const struct intel_display_device_info pnv_display = {
+ GEN3_DISPLAY,
+ I9XX_COLORS,
.has_hotplug = 1,
};
@@ -281,6 +297,8 @@ static const struct intel_display_device_info g33_display = {
static const struct intel_display_device_info i965g_display = {
GEN4_DISPLAY,
.has_overlay = 1,
+
+ .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* SDVO B/C */
};
static const struct intel_display_device_info i965gm_display = {
@@ -288,17 +306,21 @@ static const struct intel_display_device_info i965gm_display = {
.has_overlay = 1,
.supports_tv = 1,
+ .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* SDVO B/C */
.__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
};
static const struct intel_display_device_info g45_display = {
GEN4_DISPLAY,
+
+ .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* SDVO/HDMI/DP B/C, DP D */
};
static const struct intel_display_device_info gm45_display = {
GEN4_DISPLAY,
.supports_tv = 1,
+ .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* SDVO/HDMI/DP B/C, DP D */
.__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
};
@@ -311,7 +333,8 @@ static const struct intel_display_device_info gm45_display = {
.__runtime_defaults.ip.ver = 5, \
.__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.__runtime_defaults.cpu_transcoder_mask = \
- BIT(TRANSCODER_A) | BIT(TRANSCODER_B)
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
static const struct intel_display_device_info ilk_d_display = {
ILK_DISPLAY,
@@ -333,6 +356,7 @@ static const struct intel_display_device_info snb_display = {
.__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
.__runtime_defaults.cpu_transcoder_mask =
BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
.__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
};
@@ -346,6 +370,7 @@ static const struct intel_display_device_info ivb_display = {
.__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.__runtime_defaults.cpu_transcoder_mask =
BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
.__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
};
@@ -361,6 +386,7 @@ static const struct intel_display_device_info vlv_display = {
.__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
.__runtime_defaults.cpu_transcoder_mask =
BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+ .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C), /* HDMI/DP B/C */
};
static const struct intel_display_device_info hsw_display = {
@@ -368,6 +394,8 @@ static const struct intel_display_device_info hsw_display = {
.has_dp_mst = 1,
.has_fpga_dbg = 1,
.has_hotplug = 1,
+ .has_psr = 1,
+ .has_psr_hw_tracking = 1,
HSW_PIPE_OFFSETS,
IVB_CURSOR_OFFSETS,
IVB_COLORS,
@@ -377,6 +405,7 @@ static const struct intel_display_device_info hsw_display = {
.__runtime_defaults.cpu_transcoder_mask =
BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E),
.__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
};
@@ -385,6 +414,8 @@ static const struct intel_display_device_info bdw_display = {
.has_dp_mst = 1,
.has_fpga_dbg = 1,
.has_hotplug = 1,
+ .has_psr = 1,
+ .has_psr_hw_tracking = 1,
HSW_PIPE_OFFSETS,
IVB_CURSOR_OFFSETS,
IVB_COLORS,
@@ -394,6 +425,7 @@ static const struct intel_display_device_info bdw_display = {
.__runtime_defaults.cpu_transcoder_mask =
BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E),
.__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
};
@@ -409,6 +441,7 @@ static const struct intel_display_device_info chv_display = {
.__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.__runtime_defaults.cpu_transcoder_mask =
BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* HDMI/DP B/C/D */
};
static const struct intel_display_device_info skl_display = {
@@ -432,6 +465,7 @@ static const struct intel_display_device_info skl_display = {
.__runtime_defaults.cpu_transcoder_mask =
BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E),
.__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
};
@@ -455,7 +489,8 @@ static const struct intel_display_device_info skl_display = {
.__runtime_defaults.cpu_transcoder_mask = \
BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
- BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C)
+ BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C)
static const struct intel_display_device_info bxt_display = {
GEN9_LP_DISPLAY,
@@ -472,46 +507,57 @@ static const struct intel_display_device_info glk_display = {
.__runtime_defaults.ip.ver = 10,
};
-static const struct intel_display_device_info gen11_display = {
- .abox_mask = BIT(0),
- .dbuf.size = 2048,
- .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2),
- .has_ddi = 1,
- .has_dp_mst = 1,
- .has_fpga_dbg = 1,
- .has_hotplug = 1,
- .has_ipc = 1,
- .has_psr = 1,
- .has_psr_hw_tracking = 1,
- .pipe_offsets = {
- [TRANSCODER_A] = PIPE_A_OFFSET,
- [TRANSCODER_B] = PIPE_B_OFFSET,
- [TRANSCODER_C] = PIPE_C_OFFSET,
- [TRANSCODER_EDP] = PIPE_EDP_OFFSET,
- [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET,
- [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET,
- },
- .trans_offsets = {
- [TRANSCODER_A] = TRANSCODER_A_OFFSET,
- [TRANSCODER_B] = TRANSCODER_B_OFFSET,
- [TRANSCODER_C] = TRANSCODER_C_OFFSET,
- [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET,
- [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET,
- [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET,
- },
- IVB_CURSOR_OFFSETS,
- ICL_COLORS,
+#define ICL_DISPLAY \
+ .abox_mask = BIT(0), \
+ .dbuf.size = 2048, \
+ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
+ .has_ddi = 1, \
+ .has_dp_mst = 1, \
+ .has_fpga_dbg = 1, \
+ .has_hotplug = 1, \
+ .has_ipc = 1, \
+ .has_psr = 1, \
+ .has_psr_hw_tracking = 1, \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
+ }, \
+ IVB_CURSOR_OFFSETS, \
+ ICL_COLORS, \
+ \
+ .__runtime_defaults.ip.ver = 11, \
+ .__runtime_defaults.has_dmc = 1, \
+ .__runtime_defaults.has_dsc = 1, \
+ .__runtime_defaults.has_hdcp = 1, \
+ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .__runtime_defaults.cpu_transcoder_mask = \
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
+ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A)
- .__runtime_defaults.ip.ver = 11,
- .__runtime_defaults.has_dmc = 1,
- .__runtime_defaults.has_dsc = 1,
- .__runtime_defaults.has_hdcp = 1,
- .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .__runtime_defaults.cpu_transcoder_mask =
- BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) |
- BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1),
- .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
+static const struct intel_display_device_info icl_display = {
+ ICL_DISPLAY,
+
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) | BIT(PORT_E),
+};
+
+static const struct intel_display_device_info jsl_ehl_display = {
+ ICL_DISPLAY,
+
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D),
};
#define XE_D_DISPLAY \
@@ -559,6 +605,20 @@ static const struct intel_display_device_info gen11_display = {
static const struct intel_display_device_info tgl_display = {
XE_D_DISPLAY,
+
+ /*
+ * FIXME DDI C/combo PHY C missing due to combo PHY
+ * code making a mess on SKUs where the PHY is missing.
+ */
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
+ BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4) | BIT(PORT_TC5) | BIT(PORT_TC6),
+};
+
+static const struct intel_display_device_info dg1_display = {
+ XE_D_DISPLAY,
+
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
+ BIT(PORT_TC1) | BIT(PORT_TC2),
};
static const struct intel_display_device_info rkl_display = {
@@ -570,12 +630,17 @@ static const struct intel_display_device_info rkl_display = {
.__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.__runtime_defaults.cpu_transcoder_mask =
BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
+ BIT(PORT_TC1) | BIT(PORT_TC2),
};
static const struct intel_display_device_info adl_s_display = {
XE_D_DISPLAY,
.has_hti = 1,
.has_psr_hw_tracking = 0,
+
+ .__runtime_defaults.port_mask = BIT(PORT_A) |
+ BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
};
#define XE_LPD_FEATURES \
@@ -630,6 +695,8 @@ static const struct intel_display_device_info xe_lpd_display = {
BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) |
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
+ BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
};
static const struct intel_display_device_info xe_hpd_display = {
@@ -639,6 +706,8 @@ static const struct intel_display_device_info xe_hpd_display = {
.__runtime_defaults.cpu_transcoder_mask =
BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D_XELPD) |
+ BIT(PORT_TC1),
};
static const struct intel_display_device_info xe_lpdp_display = {
@@ -651,14 +720,28 @@ static const struct intel_display_device_info xe_lpdp_display = {
.__runtime_defaults.cpu_transcoder_mask =
BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) |
+ BIT(PORT_TC1) | BIT(PORT_TC2) | BIT(PORT_TC3) | BIT(PORT_TC4),
};
-__diag_pop();
+/*
+ * Separate detection for no display cases to keep the display id array simple.
+ *
+ * IVB Q requires subvendor and subdevice matching to differentiate from IVB D
+ * GT2 server.
+ */
+static bool has_no_display(struct pci_dev *pdev)
+{
+ static const struct pci_device_id ids[] = {
+ INTEL_IVB_Q_IDS(0),
+ {}
+ };
+
+ return pci_match_id(ids, pdev);
+}
#undef INTEL_VGA_DEVICE
-#undef INTEL_QUANTA_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) { id, info }
-#define INTEL_QUANTA_VGA_DEVICE(info) { 0x16a, info }
static const struct {
u32 devid;
@@ -677,13 +760,12 @@ static const struct {
INTEL_I965GM_IDS(&i965gm_display),
INTEL_GM45_IDS(&gm45_display),
INTEL_G45_IDS(&g45_display),
- INTEL_PINEVIEW_G_IDS(&g33_display),
- INTEL_PINEVIEW_M_IDS(&g33_display),
+ INTEL_PINEVIEW_G_IDS(&pnv_display),
+ INTEL_PINEVIEW_M_IDS(&pnv_display),
INTEL_IRONLAKE_D_IDS(&ilk_d_display),
INTEL_IRONLAKE_M_IDS(&ilk_m_display),
INTEL_SNB_D_IDS(&snb_display),
INTEL_SNB_M_IDS(&snb_display),
- INTEL_IVB_Q_IDS(NULL), /* must be first IVB in list */
INTEL_IVB_M_IDS(&ivb_display),
INTEL_IVB_D_IDS(&ivb_display),
INTEL_HSW_IDS(&hsw_display),
@@ -695,11 +777,11 @@ static const struct {
INTEL_GLK_IDS(&glk_display),
INTEL_KBL_IDS(&skl_display),
INTEL_CFL_IDS(&skl_display),
- INTEL_ICL_11_IDS(&gen11_display),
- INTEL_EHL_IDS(&gen11_display),
- INTEL_JSL_IDS(&gen11_display),
+ INTEL_ICL_11_IDS(&icl_display),
+ INTEL_EHL_IDS(&jsl_ehl_display),
+ INTEL_JSL_IDS(&jsl_ehl_display),
INTEL_TGL_12_IDS(&tgl_display),
- INTEL_DG1_IDS(&tgl_display),
+ INTEL_DG1_IDS(&dg1_display),
INTEL_RKL_IDS(&rkl_display),
INTEL_ADLS_IDS(&adl_s_display),
INTEL_RPLS_IDS(&adl_s_display),
@@ -731,6 +813,15 @@ probe_gmdid_display(struct drm_i915_private *i915, u16 *ver, u16 *rel, u16 *step
u32 val;
int i;
+ /* The caller expects to ver, rel and step to be initialized
+ * here, and there's no good way to check when there was a
+ * failure and no_display was returned. So initialize all these
+ * values here zero, to be sure.
+ */
+ *ver = 0;
+ *rel = 0;
+ *step = 0;
+
addr = pci_iomap_range(pdev, 0, i915_mmio_reg_offset(GMD_ID_DISPLAY), sizeof(u32));
if (!addr) {
drm_err(&i915->drm, "Cannot map MMIO BAR to read display GMD_ID\n");
@@ -740,9 +831,10 @@ probe_gmdid_display(struct drm_i915_private *i915, u16 *ver, u16 *rel, u16 *step
val = ioread32(addr);
pci_iounmap(pdev, addr);
- if (val == 0)
- /* Platform doesn't have display */
+ if (val == 0) {
+ drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
return &no_display;
+ }
*ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val);
*rel = REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
@@ -768,6 +860,11 @@ intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid,
if (has_gmdid)
return probe_gmdid_display(i915, gmdid_ver, gmdid_rel, gmdid_step);
+ if (has_no_display(pdev)) {
+ drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
+ return &no_display;
+ }
+
for (i = 0; i < ARRAY_SIZE(intel_display_ids); i++) {
if (intel_display_ids[i].devid == pdev->device)
return intel_display_ids[i].info;
@@ -778,3 +875,153 @@ intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid,
return &no_display;
}
+
+void intel_display_device_info_runtime_init(struct drm_i915_private *i915)
+{
+ struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(i915);
+ enum pipe pipe;
+
+ BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->pipe_mask) < I915_MAX_PIPES);
+ BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->cpu_transcoder_mask) < I915_MAX_TRANSCODERS);
+ BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->port_mask) < I915_MAX_PORTS);
+
+ /* Wa_14011765242: adl-s A0,A1 */
+ if (IS_ALDERLAKE_S(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_A2))
+ for_each_pipe(i915, pipe)
+ display_runtime->num_scalers[pipe] = 0;
+ else if (DISPLAY_VER(i915) >= 11) {
+ for_each_pipe(i915, pipe)
+ display_runtime->num_scalers[pipe] = 2;
+ } else if (DISPLAY_VER(i915) >= 9) {
+ display_runtime->num_scalers[PIPE_A] = 2;
+ display_runtime->num_scalers[PIPE_B] = 2;
+ display_runtime->num_scalers[PIPE_C] = 1;
+ }
+
+ if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915))
+ for_each_pipe(i915, pipe)
+ display_runtime->num_sprites[pipe] = 4;
+ else if (DISPLAY_VER(i915) >= 11)
+ for_each_pipe(i915, pipe)
+ display_runtime->num_sprites[pipe] = 6;
+ else if (DISPLAY_VER(i915) == 10)
+ for_each_pipe(i915, pipe)
+ display_runtime->num_sprites[pipe] = 3;
+ else if (IS_BROXTON(i915)) {
+ /*
+ * Skylake and Broxton currently don't expose the topmost plane as its
+ * use is exclusive with the legacy cursor and we only want to expose
+ * one of those, not both. Until we can safely expose the topmost plane
+ * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+ * we don't expose the topmost plane at all to prevent ABI breakage
+ * down the line.
+ */
+
+ display_runtime->num_sprites[PIPE_A] = 2;
+ display_runtime->num_sprites[PIPE_B] = 2;
+ display_runtime->num_sprites[PIPE_C] = 1;
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ for_each_pipe(i915, pipe)
+ display_runtime->num_sprites[pipe] = 2;
+ } else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) {
+ for_each_pipe(i915, pipe)
+ display_runtime->num_sprites[pipe] = 1;
+ }
+
+ if ((IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) &&
+ !(intel_de_read(i915, GU_CNTL_PROTECTED) & DEPRESENT)) {
+ drm_info(&i915->drm, "Display not present, disabling\n");
+ goto display_fused_off;
+ }
+
+ if (IS_GRAPHICS_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) {
+ u32 fuse_strap = intel_de_read(i915, FUSE_STRAP);
+ u32 sfuse_strap = intel_de_read(i915, SFUSE_STRAP);
+
+ /*
+ * SFUSE_STRAP is supposed to have a bit signalling the display
+ * is fused off. Unfortunately it seems that, at least in
+ * certain cases, fused off display means that PCH display
+ * reads don't land anywhere. In that case, we read 0s.
+ *
+ * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
+ * should be set when taking over after the firmware.
+ */
+ if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
+ sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
+ (HAS_PCH_CPT(i915) &&
+ !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
+ drm_info(&i915->drm,
+ "Display fused off, disabling\n");
+ goto display_fused_off;
+ } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
+ drm_info(&i915->drm, "PipeC fused off\n");
+ display_runtime->pipe_mask &= ~BIT(PIPE_C);
+ display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ }
+ } else if (DISPLAY_VER(i915) >= 9) {
+ u32 dfsm = intel_de_read(i915, SKL_DFSM);
+
+ if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
+ display_runtime->pipe_mask &= ~BIT(PIPE_A);
+ display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
+ display_runtime->fbc_mask &= ~BIT(INTEL_FBC_A);
+ }
+ if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
+ display_runtime->pipe_mask &= ~BIT(PIPE_B);
+ display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
+ }
+ if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
+ display_runtime->pipe_mask &= ~BIT(PIPE_C);
+ display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ }
+
+ if (DISPLAY_VER(i915) >= 12 &&
+ (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
+ display_runtime->pipe_mask &= ~BIT(PIPE_D);
+ display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
+ }
+
+ if (!display_runtime->pipe_mask)
+ goto display_fused_off;
+
+ if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
+ display_runtime->has_hdcp = 0;
+
+ if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
+ display_runtime->fbc_mask = 0;
+
+ if (DISPLAY_VER(i915) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ display_runtime->has_dmc = 0;
+
+ if (IS_DISPLAY_VER(i915, 10, 12) &&
+ (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE))
+ display_runtime->has_dsc = 0;
+ }
+
+ return;
+
+display_fused_off:
+ memset(display_runtime, 0, sizeof(*display_runtime));
+}
+
+void intel_display_device_info_print(const struct intel_display_device_info *info,
+ const struct intel_display_runtime_info *runtime,
+ struct drm_printer *p)
+{
+ if (runtime->ip.rel)
+ drm_printf(p, "display version: %u.%02u\n",
+ runtime->ip.ver,
+ runtime->ip.rel);
+ else
+ drm_printf(p, "display version: %u\n",
+ runtime->ip.ver);
+
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name))
+ DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
+#undef PRINT_FLAG
+
+ drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp));
+ drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc));
+ drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 2aa82cbdf1c5..215e682bd8b7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -8,9 +8,10 @@
#include <linux/types.h>
-#include "display/intel_display_limits.h"
+#include "intel_display_limits.h"
struct drm_i915_private;
+struct drm_printer;
#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
/* Keep in alphabetical order */ \
@@ -53,7 +54,7 @@ struct drm_i915_private;
#define HAS_GMCH(i915) (DISPLAY_INFO(i915)->has_gmch)
#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
#define HAS_IPC(i915) (DISPLAY_INFO(i915)->has_ipc)
-#define HAS_IPS(i915) (IS_HSW_ULT(i915) || IS_BROADWELL(i915))
+#define HAS_IPS(i915) (IS_HASWELL_ULT(i915) || IS_BROADWELL(i915))
#define HAS_LSPCON(i915) (IS_DISPLAY_VER(i915, 9, 10))
#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12)
@@ -79,6 +80,7 @@ struct intel_display_runtime_info {
u8 pipe_mask;
u8 cpu_transcoder_mask;
+ u16 port_mask;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
@@ -124,5 +126,10 @@ struct intel_display_device_info {
const struct intel_display_device_info *
intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid,
u16 *ver, u16 *rel, u16 *step);
+void intel_display_device_info_runtime_init(struct drm_i915_private *i915);
+
+void intel_display_device_info_print(const struct intel_display_device_info *info,
+ const struct intel_display_runtime_info *runtime,
+ struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 60ce10fc7205..8f144d4d3c39 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -28,6 +28,7 @@
#include "intel_crtc.h"
#include "intel_display_debugfs.h"
#include "intel_display_driver.h"
+#include "intel_display_irq.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
@@ -47,6 +48,7 @@
#include "intel_opregion.h"
#include "intel_overlay.h"
#include "intel_plane_initial.h"
+#include "intel_pmdemand.h"
#include "intel_pps.h"
#include "intel_quirks.h"
#include "intel_vga.h"
@@ -176,6 +178,7 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
+ intel_display_irq_init(i915);
intel_dkl_phy_init(i915);
intel_color_init_hooks(i915);
intel_init_cdclk_hooks(i915);
@@ -211,6 +214,8 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
if (ret < 0)
goto cleanup_vga;
+ intel_pmdemand_init_early(i915);
+
intel_power_domains_init_hw(i915, false);
if (!HAS_DISPLAY(i915))
@@ -240,6 +245,10 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
+ ret = intel_pmdemand_init(i915);
+ if (ret)
+ goto cleanup_vga_client_pw_domain_dmc;
+
init_llist_head(&i915->display.atomic_helper.free_list);
INIT_WORK(&i915->display.atomic_helper.free_work,
intel_atomic_helper_free_state_worker);
@@ -435,7 +444,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
intel_unregister_dsm_handler();
/* flush any delayed tasks or pending work */
- flush_scheduled_work();
+ flush_workqueue(i915->unordered_wq);
intel_hdcp_component_fini(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 3b2a287d2041..62ce55475554 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -18,6 +18,7 @@
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug_irq.h"
+#include "intel_pmdemand.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
@@ -748,6 +749,20 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev_priv);
+ if (de_iir & DE_EDP_PSR_INT_HSW) {
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u32 psr_iir;
+
+ psr_iir = intel_uncore_rmw(&dev_priv->uncore,
+ EDP_PSR_IIR, 0, 0);
+ intel_psr_irq_handler(intel_dp, psr_iir);
+ break;
+ }
+ }
+
if (de_iir & DE_AUX_CHANNEL_A_IVB)
intel_dp_aux_irq_handler(dev_priv);
@@ -827,12 +842,27 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
+static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
+{
+ wake_up_all(&dev_priv->display.pmdemand.waitqueue);
+}
+
static void
gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
{
bool found = false;
- if (iir & GEN8_DE_MISC_GSE) {
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ if (iir & (XELPDP_PMDEMAND_RSP |
+ XELPDP_PMDEMAND_RSPTOUT_ERR)) {
+ if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
+ drm_dbg(&dev_priv->drm,
+ "Error waiting for Punit PM Demand Response\n");
+
+ intel_pmdemand_irq_handler(dev_priv);
+ found = true;
+ }
+ } else if (iir & GEN8_DE_MISC_GSE) {
intel_opregion_asle_intr(dev_priv);
found = true;
}
@@ -873,7 +903,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
enum pipe pipe = INVALID_PIPE;
enum transcoder dsi_trans;
enum port port;
- u32 val, tmp;
+ u32 val;
/*
* Incase of dual link, TE comes from DSI_1
@@ -920,7 +950,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
- tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
+ intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
}
static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
@@ -1119,7 +1149,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
{
- void __iomem * const regs = i915->uncore.regs;
+ void __iomem * const regs = intel_uncore_regs(&i915->uncore);
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -1140,7 +1170,7 @@ void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
void gen11_display_irq_handler(struct drm_i915_private *i915)
{
- void __iomem * const regs = i915->uncore.regs;
+ void __iomem * const regs = intel_uncore_regs(&i915->uncore);
const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
disable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -1507,7 +1537,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
* to avoid races with the irq handler, assuming we have MSI. Shared legacy
* interrupts could still race.
*/
-void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u32 mask;
@@ -1553,6 +1583,50 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
vlv_display_irq_reset(dev_priv);
}
+void ilk_de_irq_postinstall(struct drm_i915_private *i915)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 display_mask, extra_mask;
+
+ if (GRAPHICS_VER(i915) >= 7) {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+ DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
+ extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
+ DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
+ DE_DP_A_HOTPLUG_IVB);
+ } else {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
+ DE_PIPEA_CRC_DONE | DE_POISON);
+ extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
+ DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+ DE_PLANE_FLIP_DONE(PLANE_A) |
+ DE_PLANE_FLIP_DONE(PLANE_B) |
+ DE_DP_A_HOTPLUG);
+ }
+
+ if (IS_HASWELL(i915)) {
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ display_mask |= DE_EDP_PSR_INT_HSW;
+ }
+
+ if (IS_IRONLAKE_M(i915))
+ extra_mask |= DE_PCU_EVENT;
+
+ i915->irq_mask = ~display_mask;
+
+ ibx_irq_postinstall(i915);
+
+ GEN3_IRQ_INIT(uncore, DE, i915->irq_mask,
+ display_mask | extra_mask);
+}
+
+static void mtp_irq_postinstall(struct drm_i915_private *i915);
+static void icp_irq_postinstall(struct drm_i915_private *i915);
+
void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -1570,13 +1644,23 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
+ if (DISPLAY_VER(dev_priv) >= 14)
+ mtp_irq_postinstall(dev_priv);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ icp_irq_postinstall(dev_priv);
+ else if (HAS_PCH_SPLIT(dev_priv))
+ ibx_irq_postinstall(dev_priv);
+
if (DISPLAY_VER(dev_priv) <= 10)
de_misc_masked |= GEN8_DE_MISC_GSE;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
de_port_masked |= BXT_DE_PORT_GMBUS;
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
+ XELPDP_PMDEMAND_RSP;
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
enum port port;
if (intel_bios_is_dsi_present(dev_priv, &port))
@@ -1633,7 +1717,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
-void mtp_irq_postinstall(struct drm_i915_private *i915)
+static void mtp_irq_postinstall(struct drm_i915_private *i915)
{
struct intel_uncore *uncore = &i915->uncore;
u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
@@ -1647,7 +1731,7 @@ void mtp_irq_postinstall(struct drm_i915_private *i915)
GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff);
}
-void icp_irq_postinstall(struct drm_i915_private *dev_priv)
+static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u32 mask = SDE_GMBUS_ICP;
@@ -1666,3 +1750,30 @@ void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN11_DISPLAY_IRQ_ENABLE);
}
+void dg1_de_irq_postinstall(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ gen8_de_irq_postinstall(i915);
+ intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL,
+ GEN11_DISPLAY_IRQ_ENABLE);
+}
+
+void intel_display_irq_init(struct drm_i915_private *i915)
+{
+ i915->drm.vblank_disable_immediate = true;
+
+ /*
+ * Most platforms treat the display irq block as an always-on power
+ * domain. vlv/chv can disable it at runtime and need special care to
+ * avoid writing any of the display block registers outside of the power
+ * domain. We defer setting up the display irqs in this case to the
+ * runtime pm.
+ */
+ i915->display_irqs_enabled = true;
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->display_irqs_enabled = false;
+
+ intel_hotplug_irq_init(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index 874893f4f16d..2a090dd6abd7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -58,12 +58,11 @@ void vlv_display_irq_reset(struct drm_i915_private *i915);
void gen8_display_irq_reset(struct drm_i915_private *i915);
void gen11_display_irq_reset(struct drm_i915_private *i915);
-void ibx_irq_postinstall(struct drm_i915_private *i915);
void vlv_display_irq_postinstall(struct drm_i915_private *i915);
-void icp_irq_postinstall(struct drm_i915_private *i915);
+void ilk_de_irq_postinstall(struct drm_i915_private *i915);
void gen8_de_irq_postinstall(struct drm_i915_private *i915);
-void mtp_irq_postinstall(struct drm_i915_private *i915);
void gen11_de_irq_postinstall(struct drm_i915_private *i915);
+void dg1_de_irq_postinstall(struct drm_i915_private *i915);
u32 i915_pipestat_enable_mask(struct drm_i915_private *i915, enum pipe pipe);
void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
@@ -78,4 +77,6 @@ void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_
void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
void i8xx_pipestat_irq_handler(struct drm_i915_private *i915, u16 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void intel_display_irq_init(struct drm_i915_private *i915);
+
#endif /* __INTEL_DISPLAY_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 2f4f00ae2f57..9e01054c2430 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -10,6 +10,7 @@
#include "i915_reg.h"
#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
+#include "intel_clock_gating.h"
#include "intel_combo_phy.h"
#include "intel_de.h"
#include "intel_display_power.h"
@@ -20,6 +21,7 @@
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
+#include "intel_pmdemand.h"
#include "intel_pps_regs.h"
#include "intel_snps_phy.h"
#include "skl_watermark.h"
@@ -456,6 +458,17 @@ async_put_domains_clear_domain(struct i915_power_domains *power_domains,
clear_bit(domain, power_domains->async_put_domains[1].bits);
}
+static void
+cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
+{
+ if (sync)
+ cancel_delayed_work_sync(&power_domains->async_put_work);
+ else
+ cancel_delayed_work(&power_domains->async_put_work);
+
+ power_domains->async_put_next_delay = 0;
+}
+
static bool
intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
@@ -476,7 +489,7 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
goto out_verify;
- cancel_delayed_work(&power_domains->async_put_work);
+ cancel_async_put_work(power_domains, false);
intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
fetch_and_zero(&power_domains->async_put_wakeref));
out_verify:
@@ -607,7 +620,8 @@ static void __intel_display_power_put(struct drm_i915_private *dev_priv,
static void
queue_async_put_domains_work(struct i915_power_domains *power_domains,
- intel_wakeref_t wakeref)
+ intel_wakeref_t wakeref,
+ int delay_ms)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
@@ -616,7 +630,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
power_domains->async_put_wakeref = wakeref;
drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
&power_domains->async_put_work,
- msecs_to_jiffies(100)));
+ msecs_to_jiffies(delay_ms)));
}
static void
@@ -679,13 +693,15 @@ intel_display_power_put_async_work(struct work_struct *work)
bitmap_zero(power_domains->async_put_domains[1].bits,
POWER_DOMAIN_NUM);
queue_async_put_domains_work(power_domains,
- fetch_and_zero(&new_work_wakeref));
+ fetch_and_zero(&new_work_wakeref),
+ power_domains->async_put_next_delay);
+ power_domains->async_put_next_delay = 0;
} else {
/*
* Cancel the work that got queued after this one got dequeued,
* since here we released the corresponding async-put reference.
*/
- cancel_delayed_work(&power_domains->async_put_work);
+ cancel_async_put_work(power_domains, false);
}
out_verify:
@@ -704,19 +720,25 @@ out_verify:
* @i915: i915 device instance
* @domain: power domain to reference
* @wakeref: wakeref acquired for the reference that is being released
+ * @delay_ms: delay of powering down the power domain
*
* This function drops the power domain reference obtained by
* intel_display_power_get*() and schedules a work to power down the
* corresponding hardware block if this is the last reference.
+ * The power down is delayed by @delay_ms if this is >= 0, or by a default
+ * 100 ms otherwise.
*/
void __intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
- intel_wakeref_t wakeref)
+ intel_wakeref_t wakeref,
+ int delay_ms)
{
struct i915_power_domains *power_domains = &i915->display.power.domains;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
+ delay_ms = delay_ms >= 0 ? delay_ms : 100;
+
mutex_lock(&power_domains->lock);
if (power_domains->domain_use_count[domain] > 1) {
@@ -730,10 +752,13 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
/* Let a pending work requeue itself or queue a new one. */
if (power_domains->async_put_wakeref) {
set_bit(domain, power_domains->async_put_domains[1].bits);
+ power_domains->async_put_next_delay = max(power_domains->async_put_next_delay,
+ delay_ms);
} else {
set_bit(domain, power_domains->async_put_domains[0].bits);
queue_async_put_domains_work(power_domains,
- fetch_and_zero(&work_wakeref));
+ fetch_and_zero(&work_wakeref),
+ delay_ms);
}
out_verify:
@@ -773,7 +798,7 @@ void intel_display_power_flush_work(struct drm_i915_private *i915)
async_put_domains_mask(power_domains, &async_put_mask);
release_async_put_domains(power_domains, &async_put_mask);
- cancel_delayed_work(&power_domains->async_put_work);
+ cancel_async_put_work(power_domains, false);
out_verify:
verify_async_put_domains_state(power_domains);
@@ -797,7 +822,7 @@ intel_display_power_flush_work_sync(struct drm_i915_private *i915)
struct i915_power_domains *power_domains = &i915->display.power.domains;
intel_display_power_flush_work(i915);
- cancel_delayed_work_sync(&power_domains->async_put_work);
+ cancel_async_put_work(power_domains, true);
verify_async_put_domains_state(power_domains);
@@ -1082,20 +1107,29 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
+ u8 slices_mask;
+
dev_priv->display.dbuf.enabled_slices =
intel_enabled_dbuf_slices_mask(dev_priv);
+ slices_mask = BIT(DBUF_S1) | dev_priv->display.dbuf.enabled_slices;
+
+ if (DISPLAY_VER(dev_priv) >= 14)
+ intel_pmdemand_program_dbuf(dev_priv, slices_mask);
+
/*
* Just power up at least 1 slice, we will
* figure out later which slices we have and what we need.
*/
- gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
- dev_priv->display.dbuf.enabled_slices);
+ gen9_dbuf_slices_update(dev_priv, slices_mask);
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
gen9_dbuf_slices_update(dev_priv, 0);
+
+ if (DISPLAY_VER(dev_priv) >= 14)
+ intel_pmdemand_program_dbuf(dev_priv, 0);
}
static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
@@ -1375,9 +1409,8 @@ static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
hsw_restore_lcpll(dev_priv);
intel_init_pch_refclk(dev_priv);
- if (HAS_PCH_LPT_LP(dev_priv))
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
- 0, PCH_LP_PARTITION_LEVEL_DISABLE);
+ /* Many display registers don't survive PC8+ */
+ intel_clock_gating_init(dev_priv);
}
static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
@@ -1576,7 +1609,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
return;
if (IS_ALDERLAKE_S(dev_priv) ||
- IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ (IS_ROCKETLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)))
/* Wa_1409767108 */
table = wa_1409767108_buddy_page_masks;
else
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index be1a87bde0c9..d3b5d04b7b07 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -6,16 +6,17 @@
#ifndef __INTEL_DISPLAY_POWER_H__
#define __INTEL_DISPLAY_POWER_H__
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
#include "intel_wakeref.h"
enum aux_ch;
-enum dpio_channel;
-enum dpio_phy;
-enum i915_drm_suspend_mode;
enum port;
struct drm_i915_private;
struct i915_power_well;
struct intel_encoder;
+struct seq_file;
/*
* Keep the pipe, transcoder, port (DDI_LANES,DDI_IO,AUX) domain instances
@@ -150,6 +151,7 @@ struct i915_power_domains {
struct delayed_work async_put_work;
intel_wakeref_t async_put_wakeref;
struct intel_power_domain_mask async_put_domains[2];
+ int async_put_next_delay;
struct i915_power_well *power_wells;
};
@@ -196,7 +198,8 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void __intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
- intel_wakeref_t wakeref);
+ intel_wakeref_t wakeref,
+ int delay_ms);
void intel_display_power_flush_work(struct drm_i915_private *i915);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void intel_display_power_put(struct drm_i915_private *dev_priv,
@@ -207,7 +210,16 @@ intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- __intel_display_power_put_async(i915, domain, wakeref);
+ __intel_display_power_put_async(i915, domain, wakeref, -1);
+}
+
+static inline void
+intel_display_power_put_async_delay(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref,
+ int delay_ms)
+{
+ __intel_display_power_put_async(i915, domain, wakeref, delay_ms);
}
#else
void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
@@ -226,7 +238,16 @@ intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- __intel_display_power_put_async(i915, domain, -1);
+ __intel_display_power_put_async(i915, domain, -1, -1);
+}
+
+static inline void
+intel_display_power_put_async_delay(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref,
+ int delay_ms)
+{
+ __intel_display_power_put_async(i915, domain, -1, delay_ms);
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 1118ee9d224c..5ad04cd42c15 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1252,10 +1252,18 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a,
POWER_DOMAIN_INIT);
#define XELPD_DC_OFF_PORT_POWER_DOMAINS \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
POWER_DOMAIN_PORT_DDI_LANES_TC1, \
POWER_DOMAIN_PORT_DDI_LANES_TC2, \
POWER_DOMAIN_PORT_DDI_LANES_TC3, \
POWER_DOMAIN_PORT_DDI_LANES_TC4, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_IO_C, \
+ POWER_DOMAIN_AUX_IO_D, \
+ POWER_DOMAIN_AUX_IO_E, \
POWER_DOMAIN_AUX_C, \
POWER_DOMAIN_AUX_D, \
POWER_DOMAIN_AUX_E, \
@@ -1272,14 +1280,6 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a,
XELPD_PW_B_POWER_DOMAINS, \
XELPD_PW_C_POWER_DOMAINS, \
XELPD_PW_D_POWER_DOMAINS, \
- POWER_DOMAIN_PORT_DDI_LANES_C, \
- POWER_DOMAIN_PORT_DDI_LANES_D, \
- POWER_DOMAIN_PORT_DDI_LANES_E, \
- POWER_DOMAIN_VGA, \
- POWER_DOMAIN_AUDIO_PLAYBACK, \
- POWER_DOMAIN_AUX_IO_C, \
- POWER_DOMAIN_AUX_IO_D, \
- POWER_DOMAIN_AUX_IO_E, \
XELPD_DC_OFF_PORT_POWER_DOMAINS
I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index e494df379e6c..a8736588314d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -11,7 +11,8 @@
#include "intel_dpio_phy.h"
struct drm_i915_private;
-struct i915_power_well;
+struct i915_power_well_ops;
+struct intel_encoder;
#define for_each_power_well(__dev_priv, __power_well) \
for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 8a88de67ff0a..5f479f3828bb 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -1057,7 +1057,7 @@ void intel_dmc_init(struct drm_i915_private *i915)
i915->display.dmc.dmc = dmc;
drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path);
- schedule_work(&dmc->work);
+ queue_work(i915->unordered_wq, &dmc->work);
return;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f4192fda1a76..12bd2f322e62 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -500,7 +500,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
max_rate = 810000;
- else if (IS_JSL_EHL(dev_priv))
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
@@ -510,7 +510,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
} else if (DISPLAY_VER(dev_priv) == 9) {
source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
- } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+ } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) ||
IS_BROADWELL(dev_priv)) {
source_rates = hsw_rates;
size = ARRAY_SIZE(hsw_rates);
@@ -713,9 +713,18 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
/*
* According to BSpec, 27 is the max DSC output bpp,
- * 8 is the min DSC output bpp
+ * 8 is the min DSC output bpp.
+ * While we can still clamp higher bpp values to 27, saving bandwidth,
+ * if it is required to oompress up to bpp < 8, means we can't do
+ * that and probably means we can't fit the required mode, even with
+ * DSC enabled.
*/
- bits_per_pixel = clamp_t(u32, bits_per_pixel, 8, 27);
+ if (bits_per_pixel < 8) {
+ drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n",
+ bits_per_pixel);
+ return 0;
+ }
+ bits_per_pixel = min_t(u32, bits_per_pixel, 27);
} else {
/* Find the nearest match in the array of known BPPs from VESA */
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
@@ -4069,9 +4078,7 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
{
bool handled = false;
- drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
- if (handled)
- ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
+ drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
if (esi[1] & DP_CP_IRQ) {
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
@@ -4146,6 +4153,9 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
+
+ if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
+ drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
}
return link_ok;
@@ -5251,7 +5261,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
spin_lock_irq(&i915->irq_lock);
i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin);
spin_unlock_irq(&i915->irq_lock);
- queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0);
+ queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 197c6e81db14..2d173bd495a3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -14,7 +14,7 @@
#include "intel_pps.h"
#include "intel_tc.h"
-static u32 intel_dp_aux_pack(const u8 *src, int src_bytes)
+u32 intel_dp_aux_pack(const u8 *src, int src_bytes)
{
int i;
u32 v = 0;
@@ -792,25 +792,60 @@ static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
return (enum aux_ch)encoder->port;
}
+static struct intel_encoder *
+get_encoder_by_aux_ch(struct intel_encoder *encoder,
+ enum aux_ch aux_ch)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_encoder *other;
+
+ for_each_intel_encoder(&i915->drm, other) {
+ if (other == encoder)
+ continue;
+
+ if (!intel_encoder_is_dig_port(other))
+ continue;
+
+ if (enc_to_dig_port(other)->aux_ch == aux_ch)
+ return other;
+ }
+
+ return NULL;
+}
+
enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_encoder *other;
+ const char *source;
enum aux_ch aux_ch;
aux_ch = intel_bios_dp_aux_ch(encoder->devdata);
- if (aux_ch != AUX_CH_NONE) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] using AUX %c (VBT)\n",
- encoder->base.base.id, encoder->base.name,
- aux_ch_name(aux_ch));
- return aux_ch;
+ source = "VBT";
+
+ if (aux_ch == AUX_CH_NONE) {
+ aux_ch = default_aux_ch(encoder);
+ source = "platform default";
}
- aux_ch = default_aux_ch(encoder);
+ if (aux_ch == AUX_CH_NONE)
+ return AUX_CH_NONE;
+
+ /* FIXME validate aux_ch against platform caps */
+
+ other = get_encoder_by_aux_ch(encoder, aux_ch);
+ if (other) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] AUX CH %c already claimed by [ENCODER:%d:%s]\n",
+ encoder->base.base.id, encoder->base.name, aux_ch_name(aux_ch),
+ other->base.base.id, other->base.name);
+ return AUX_CH_NONE;
+ }
drm_dbg_kms(&i915->drm,
- "[ENCODER:%d:%s] using AUX %c (platform default)\n",
+ "[ENCODER:%d:%s] Using AUX CH %c (%s)\n",
encoder->base.base.id, encoder->base.name,
- aux_ch_name(aux_ch));
+ aux_ch_name(aux_ch), source);
return aux_ch;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 5b608f9d3499..8447f3e601fe 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -6,6 +6,8 @@
#ifndef __INTEL_DP_AUX_H__
#define __INTEL_DP_AUX_H__
+#include <linux/types.h>
+
enum aux_ch;
struct drm_i915_private;
struct intel_dp;
@@ -17,5 +19,6 @@ void intel_dp_aux_init(struct intel_dp *intel_dp);
enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
void intel_dp_aux_irq_handler(struct drm_i915_private *i915);
+u32 intel_dp_aux_pack(const u8 *src, int src_bytes);
#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 0952a707358c..a263773f4d68 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -1064,6 +1064,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n");
@@ -1081,7 +1082,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
}
/* Schedule a Hotplug Uevent to userspace to start modeset */
- schedule_work(&intel_connector->modeset_retry_work);
+ queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work);
}
/* Perform the link training on all LTTPRs and the DPRX on a link. */
@@ -1279,7 +1280,7 @@ intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
if (drm_dp_128b132b_eq_interlane_align_done(link_status) &&
drm_dp_128b132b_cds_interlane_align_done(link_status) &&
drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) {
- lt_err(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n");
+ lt_dbg(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n");
break;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 824be7f03724..999badfe2906 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1793,13 +1793,11 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, tribuf_calcntr;
- u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
+ u32 bestm2, bestp1, bestp2, bestm2_frac;
u32 dpio_val;
int vco;
- bestn = crtc_state->dpll.n;
bestm2_frac = crtc_state->dpll.m2 & 0x3fffff;
- bestm1 = crtc_state->dpll.m1;
bestm2 = crtc_state->dpll.m2 >> 22;
bestp1 = crtc_state->dpll.p1;
bestp2 = crtc_state->dpll.p2;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 6b2d8a1e2aa9..a9b19e80bff7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -191,7 +191,8 @@ intel_combo_pll_enable_reg(struct drm_i915_private *i915,
{
if (IS_DG1(i915))
return DG1_DPLL_ENABLE(pll->info->id);
- else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
+ else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ (pll->info->id == DPLL_ID_EHL_DPLL4))
return MG_PLL_ENABLE(0);
return ICL_DPLL_ENABLE(pll->info->id);
@@ -927,7 +928,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
switch (wrpll & WRPLL_REF_MASK) {
case WRPLL_REF_SPECIAL_HSW:
/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
- if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+ if (IS_HASWELL(dev_priv) && !IS_HASWELL_ULT(dev_priv)) {
refclk = dev_priv->display.dpll.ref_clks.nssc;
break;
}
@@ -2460,8 +2461,8 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{
- return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
- IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
+ return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
+ IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
i915->display.dpll.ref_clks.nssc == 38400;
}
@@ -3226,7 +3227,8 @@ static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
BIT(DPLL_ID_EHL_DPLL4) |
BIT(DPLL_ID_ICL_DPLL1) |
BIT(DPLL_ID_ICL_DPLL0);
- } else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
+ } else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ port != PORT_A) {
dpll_mask =
BIT(DPLL_ID_EHL_DPLL4) |
BIT(DPLL_ID_ICL_DPLL1) |
@@ -3567,7 +3569,8 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
}
} else {
- if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ id == DPLL_ID_EHL_DPLL4) {
hw_state->cfgcr0 = intel_de_read(dev_priv,
ICL_DPLL_CFGCR0(4));
hw_state->cfgcr1 = intel_de_read(dev_priv,
@@ -3623,7 +3626,8 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
cfgcr1_reg = TGL_DPLL_CFGCR1(id);
div0_reg = TGL_DPLL0_DIV0(id);
} else {
- if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ id == DPLL_ID_EHL_DPLL4) {
cfgcr0_reg = ICL_DPLL_CFGCR0(4);
cfgcr1_reg = ICL_DPLL_CFGCR1(4);
} else {
@@ -3781,7 +3785,7 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte
{
u32 val;
- if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
+ if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
pll->info->id != DPLL_ID_ICL_DPLL0)
return;
/*
@@ -3806,7 +3810,7 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
- if (IS_JSL_EHL(dev_priv) &&
+ if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
pll->info->id == DPLL_ID_EHL_DPLL4) {
/*
@@ -3914,7 +3918,7 @@ static void combo_pll_disable(struct drm_i915_private *dev_priv,
icl_pll_disable(dev_priv, pll, enable_reg);
- if (IS_JSL_EHL(dev_priv) &&
+ if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
pll->info->id == DPLL_ID_EHL_DPLL4)
intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
pll->wakeref);
@@ -4150,7 +4154,7 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
dpll_mgr = &rkl_pll_mgr;
else if (DISPLAY_VER(dev_priv) >= 12)
dpll_mgr = &tgl_pll_mgr;
- else if (IS_JSL_EHL(dev_priv))
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
dpll_mgr = &ehl_pll_mgr;
else if (DISPLAY_VER(dev_priv) >= 11)
dpll_mgr = &icl_pll_mgr;
@@ -4335,7 +4339,8 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
- if (IS_JSL_EHL(i915) && pll->on &&
+ if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ pll->on &&
pll->info->id == DPLL_ID_EHL_DPLL4) {
pll->wakeref = intel_display_power_get(i915,
POWER_DOMAIN_DC_OFF);
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 7c5fddb203ba..fbfd8f959f17 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -166,6 +166,8 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
i915_vma_get(vma);
}
+ dpt->obj->mm.dirty = true;
+
atomic_dec(&i915->gpu_error.pending_fb_pin);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -261,7 +263,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
dpt_obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
- dpt_obj = i915_gem_object_create_internal(i915, size);
+ dpt_obj = i915_gem_object_create_shmem(i915, size);
}
if (IS_ERR(dpt_obj))
return ERR_CAST(dpt_obj);
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 760e63cdc0c8..0d35b6be5b6a 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -111,7 +111,9 @@ static void intel_drrs_set_state(struct intel_crtc *crtc,
static void intel_drrs_schedule_work(struct intel_crtc *crtc)
{
- mod_delayed_work(system_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
}
static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 5efdd471ac2b..d3cf6a652221 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -9,6 +9,26 @@
#include "intel_dsi.h"
#include "intel_panel.h"
+void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi)
+{
+ ktime_t panel_power_on_time;
+ s64 panel_power_off_duration;
+
+ panel_power_on_time = ktime_get_boottime();
+ panel_power_off_duration = ktime_ms_delta(panel_power_on_time,
+ intel_dsi->panel_power_off_time);
+
+ if (panel_power_off_duration < (s64)intel_dsi->panel_pwr_cycle_delay)
+ msleep(intel_dsi->panel_pwr_cycle_delay - panel_power_off_duration);
+}
+
+void intel_dsi_shutdown(struct intel_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ intel_dsi_wait_panel_power_cycle(intel_dsi);
+}
+
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
{
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index ce80bd8be519..083390e5e442 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -173,5 +173,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
const struct mipi_dsi_host_ops *funcs,
enum port port);
+void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi);
+void intel_dsi_shutdown(struct intel_encoder *encoder);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index c7935ea498c4..e56ec3f2d84a 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -235,7 +235,7 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
u32 delay = *((const u32 *) data);
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(&i915->drm, "%d usecs\n", delay);
usleep_range(delay, delay + 10);
data += 4;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 9884678743b6..b386894c3a6d 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -509,6 +509,8 @@ void intel_dvo_init(struct drm_i915_private *i915)
return;
}
+ assert_port_valid(i915, intel_dvo->dev.port);
+
encoder->type = INTEL_OUTPUT_DVO;
encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
encoder->port = intel_dvo->dev.port;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 0d27a98dcbbe..446bbf7986b6 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1601,7 +1601,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
for (i = 0; i < num_planes; i++) {
struct fb_plane_view_dims view_dims;
unsigned int width, height;
- unsigned int cpp, size;
+ unsigned int size;
u32 offset;
int x, y;
int ret;
@@ -1618,7 +1618,6 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
return -EINVAL;
}
- cpp = fb->base.format->cpp[i];
intel_fb_plane_dims(fb, i, &width, &height);
ret = convert_plane_offset_to_xy(fb, i, width, &x, &y);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 1966f9396201..25382022cd27 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -47,6 +47,7 @@
#include "i915_reg.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
+#include "i915_vma.h"
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_trace.h"
@@ -94,8 +95,7 @@ struct intel_fbc {
struct mutex lock;
unsigned int busy_bits;
- struct drm_mm_node compressed_fb;
- struct drm_mm_node compressed_llb;
+ struct i915_stolen_fb compressed_fb, compressed_llb;
enum intel_fbc_id id;
@@ -332,15 +332,16 @@ static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
- GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
- fbc->compressed_fb.start, U32_MAX));
- GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
- fbc->compressed_llb.start, U32_MAX));
-
+ GEM_BUG_ON(range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
+ i915_gem_stolen_node_offset(&fbc->compressed_fb),
+ U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
+ i915_gem_stolen_node_offset(&fbc->compressed_llb),
+ U32_MAX));
intel_de_write(i915, FBC_CFB_BASE,
- i915->dsm.stolen.start + fbc->compressed_fb.start);
+ i915_gem_stolen_node_address(i915, &fbc->compressed_fb));
intel_de_write(i915, FBC_LL_BASE,
- i915->dsm.stolen.start + fbc->compressed_llb.start);
+ i915_gem_stolen_node_address(i915, &fbc->compressed_llb));
}
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
@@ -447,7 +448,8 @@ static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
- intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start);
+ intel_de_write(i915, DPFC_CB_BASE,
+ i915_gem_stolen_node_offset(&fbc->compressed_fb));
}
static const struct intel_fbc_funcs g4x_fbc_funcs = {
@@ -498,7 +500,8 @@ static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
- intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), fbc->compressed_fb.start);
+ intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id),
+ i915_gem_stolen_node_offset(&fbc->compressed_fb));
}
static const struct intel_fbc_funcs ilk_fbc_funcs = {
@@ -605,7 +608,7 @@ static void ivb_fbc_activate(struct intel_fbc *fbc)
else if (DISPLAY_VER(i915) == 9)
skl_fbc_program_cfb_stride(fbc);
- if (to_gt(i915)->ggtt->num_fences)
+ if (intel_gt_support_legacy_fencing(to_gt(i915)))
snb_fbc_program_fence(fbc);
intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
@@ -713,7 +716,7 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(i915) ||
(DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
- end = resource_size(&i915->dsm.stolen) - 8 * 1024 * 1024;
+ end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -770,9 +773,9 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
int ret;
drm_WARN_ON(&i915->drm,
- drm_mm_node_allocated(&fbc->compressed_fb));
+ i915_gem_stolen_node_allocated(&fbc->compressed_fb));
drm_WARN_ON(&i915->drm,
- drm_mm_node_allocated(&fbc->compressed_llb));
+ i915_gem_stolen_node_allocated(&fbc->compressed_llb));
if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
@@ -792,15 +795,14 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
drm_dbg_kms(&i915->drm,
"reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
- fbc->compressed_fb.size, fbc->limit);
-
+ i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit);
return 0;
err_llb:
- if (drm_mm_node_allocated(&fbc->compressed_llb))
+ if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
err:
- if (drm_mm_initialized(&i915->mm.stolen))
+ if (i915_gem_stolen_initialized(i915))
drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@@ -825,9 +827,9 @@ static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
if (WARN_ON(intel_fbc_hw_is_active(fbc)))
return;
- if (drm_mm_node_allocated(&fbc->compressed_llb))
+ if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
- if (drm_mm_node_allocated(&fbc->compressed_fb))
+ if (i915_gem_stolen_node_allocated(&fbc->compressed_fb))
i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
}
@@ -990,11 +992,10 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
- !plane_state->ggtt_vma->fence);
+ !intel_gt_support_legacy_fencing(to_gt(i915)));
- if (plane_state->flags & PLANE_HAS_FENCE &&
- plane_state->ggtt_vma->fence)
- fbc_state->fence_id = plane_state->ggtt_vma->fence->id;
+ if (plane_state->flags & PLANE_HAS_FENCE)
+ fbc_state->fence_id = i915_vma_fence_id(plane_state->ggtt_vma);
else
fbc_state->fence_id = -1;
@@ -1021,7 +1022,7 @@ static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
*/
return DISPLAY_VER(i915) >= 9 ||
(plane_state->flags & PLANE_HAS_FENCE &&
- plane_state->ggtt_vma->fence);
+ i915_vma_fence_id(plane_state->ggtt_vma) != -1);
}
static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
@@ -1030,7 +1031,8 @@ static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
struct intel_fbc *fbc = plane->fbc;
return intel_fbc_min_limit(plane_state) <= fbc->limit &&
- intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit;
+ intel_fbc_cfb_size(plane_state) <= fbc->limit *
+ i915_gem_stolen_node_size(&fbc->compressed_fb);
}
static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
@@ -1054,6 +1056,11 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (!fbc)
return 0;
+ if (!i915_gem_stolen_initialized(i915)) {
+ plane_state->no_fbc_reason = "stolen memory not initialised";
+ return 0;
+ }
+
if (intel_vgpu_active(i915)) {
plane_state->no_fbc_reason = "VGPU active";
return 0;
@@ -1254,7 +1261,7 @@ static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
bool intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- const struct intel_plane_state *plane_state;
+ const struct intel_plane_state __maybe_unused *plane_state;
bool need_vblank_wait = false;
struct intel_plane *plane;
int i;
@@ -1309,7 +1316,7 @@ static void __intel_fbc_post_update(struct intel_fbc *fbc)
void intel_fbc_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- const struct intel_plane_state *plane_state;
+ const struct intel_plane_state __maybe_unused *plane_state;
struct intel_plane *plane;
int i;
@@ -1408,7 +1415,7 @@ void intel_fbc_flush(struct drm_i915_private *i915,
int intel_fbc_atomic_check(struct intel_atomic_state *state)
{
- struct intel_plane_state *plane_state;
+ struct intel_plane_state __maybe_unused *plane_state;
struct intel_plane *plane;
int i;
@@ -1600,7 +1607,7 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
if (READ_ONCE(fbc->underrun_detected))
return;
- schedule_work(&fbc->underrun_work);
+ queue_work(fbc->i915->unordered_wq, &fbc->underrun_work);
}
/**
@@ -1707,9 +1714,6 @@ void intel_fbc_init(struct drm_i915_private *i915)
{
enum intel_fbc_id fbc_id;
- if (!drm_mm_initialized(&i915->mm.stolen))
- DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
-
if (need_fbc_vtd_wa(i915))
DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 3b5690acd720..31d0d695d567 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -85,9 +85,9 @@ static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
}
-FB_GEN_DEFAULT_DEFERRED_IO_OPS(intel_fbdev,
- drm_fb_helper_damage_range,
- drm_fb_helper_damage_area)
+FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(intel_fbdev,
+ drm_fb_helper_damage_range,
+ drm_fb_helper_damage_area)
static int intel_fbdev_set_par(struct fb_info *info)
{
@@ -135,9 +135,6 @@ static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
return i915_gem_fb_mmap(obj, vma);
}
-__diag_push();
-__diag_ignore_all("-Woverride-init", "Allow overriding the default ops");
-
static const struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev),
@@ -149,8 +146,6 @@ static const struct fb_ops intelfb_ops = {
.fb_mmap = intel_fbdev_mmap,
};
-__diag_pop();
-
static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -187,8 +182,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features.
+ *
+ * Also skip stolen on MTL as Wa_22018444074 mitigation.
*/
- if (size * 2 < dev_priv->dsm.usable_size)
+ if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
@@ -696,7 +693,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
/* Don't block our own workqueue as this can
* be run in parallel with other i915.ko tasks.
*/
- schedule_work(&dev_priv->display.fbdev.suspend_work);
+ queue_work(dev_priv->unordered_wq,
+ &dev_priv->display.fbdev.suspend_work);
return;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 17a7aa8b28c2..22392f94b626 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -167,7 +167,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
- struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+ struct drm_i915_private *i915 = intel_bo_to_i915(front->obj);
if (origin == ORIGIN_CS) {
spin_lock(&i915->display.fb_tracking.lock);
@@ -188,7 +188,7 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
- struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+ struct drm_i915_private *i915 = intel_bo_to_i915(front->obj);
if (origin == ORIGIN_CS) {
spin_lock(&i915->display.fb_tracking.lock);
@@ -221,24 +221,18 @@ static void frontbuffer_retire(struct i915_active *ref)
}
static void frontbuffer_release(struct kref *ref)
- __releases(&to_i915(front->obj->base.dev)->display.fb_tracking.lock)
+ __releases(&intel_bo_to_i915(front->obj)->display.fb_tracking.lock)
{
struct intel_frontbuffer *front =
container_of(ref, typeof(*front), ref);
struct drm_i915_gem_object *obj = front->obj;
- struct i915_vma *vma;
- drm_WARN_ON(obj->base.dev, atomic_read(&front->bits));
+ drm_WARN_ON(&intel_bo_to_i915(obj)->drm, atomic_read(&front->bits));
- spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- i915_vma_clear_scanout(vma);
- vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
- }
- spin_unlock(&obj->vma.lock);
+ i915_ggtt_clear_scanout(obj);
- RCU_INIT_POINTER(obj->frontbuffer, NULL);
- spin_unlock(&to_i915(obj->base.dev)->display.fb_tracking.lock);
+ i915_gem_object_set_frontbuffer(obj, NULL);
+ spin_unlock(&intel_bo_to_i915(obj)->display.fb_tracking.lock);
i915_active_fini(&front->write);
@@ -249,10 +243,10 @@ static void frontbuffer_release(struct kref *ref)
struct intel_frontbuffer *
intel_frontbuffer_get(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct intel_frontbuffer *front;
+ struct drm_i915_private *i915 = intel_bo_to_i915(obj);
+ struct intel_frontbuffer *front, *cur;
- front = __intel_frontbuffer_get(obj);
+ front = i915_gem_object_get_frontbuffer(obj);
if (front)
return front;
@@ -269,24 +263,18 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
I915_ACTIVE_RETIRE_SLEEPS);
spin_lock(&i915->display.fb_tracking.lock);
- if (rcu_access_pointer(obj->frontbuffer)) {
- kfree(front);
- front = rcu_dereference_protected(obj->frontbuffer, true);
- kref_get(&front->ref);
- } else {
- i915_gem_object_get(obj);
- rcu_assign_pointer(obj->frontbuffer, front);
- }
+ cur = i915_gem_object_set_frontbuffer(obj, front);
spin_unlock(&i915->display.fb_tracking.lock);
-
- return front;
+ if (cur != front)
+ kfree(front);
+ return cur;
}
void intel_frontbuffer_put(struct intel_frontbuffer *front)
{
kref_put_lock(&front->ref,
frontbuffer_release,
- &to_i915(front->obj->base.dev)->display.fb_tracking.lock);
+ &intel_bo_to_i915(front->obj)->display.fb_tracking.lock);
}
/**
@@ -315,13 +303,13 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE);
if (old) {
- drm_WARN_ON(old->obj->base.dev,
+ drm_WARN_ON(&intel_bo_to_i915(old->obj)->drm,
!(atomic_read(&old->bits) & frontbuffer_bits));
atomic_andnot(frontbuffer_bits, &old->bits);
}
if (new) {
- drm_WARN_ON(new->obj->base.dev,
+ drm_WARN_ON(&intel_bo_to_i915(new->obj)->drm,
atomic_read(&new->bits) & frontbuffer_bits);
atomic_or(frontbuffer_bits, &new->bits);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 3c474ed937fb..72d89be3284b 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -28,7 +28,6 @@
#include <linux/bits.h>
#include <linux/kref.h>
-#include "gem/i915_gem_object_types.h"
#include "i915_active_types.h"
struct drm_i915_private;
@@ -75,33 +74,6 @@ void intel_frontbuffer_flip(struct drm_i915_private *i915,
void intel_frontbuffer_put(struct intel_frontbuffer *front);
-static inline struct intel_frontbuffer *
-__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
-{
- struct intel_frontbuffer *front;
-
- if (likely(!rcu_access_pointer(obj->frontbuffer)))
- return NULL;
-
- rcu_read_lock();
- do {
- front = rcu_dereference(obj->frontbuffer);
- if (!front)
- break;
-
- if (unlikely(!kref_get_unless_zero(&front->ref)))
- continue;
-
- if (likely(front == rcu_access_pointer(obj->frontbuffer)))
- break;
-
- intel_frontbuffer_put(front);
- } while (1);
- rcu_read_unlock();
-
- return front;
-}
-
struct intel_frontbuffer *
intel_frontbuffer_get(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index 02b593b1e2ea..e8e8be54143b 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -255,3 +255,15 @@ int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
return 0;
}
+
+bool
+intel_atomic_global_state_is_serialized(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc)
+ if (!intel_atomic_get_new_crtc_state(state, crtc))
+ return false;
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
index f01ee0bb3e5a..5477de8f0b30 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.h
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -87,4 +87,6 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state);
int intel_atomic_lock_global_state(struct intel_global_state *obj_state);
int intel_atomic_serialize_global_state(struct intel_global_state *obj_state);
+bool intel_atomic_global_state_is_serialized(struct intel_atomic_state *state);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 17542c28dfd5..a42549fa9691 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -177,8 +177,11 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
- if (!gsc || !intel_uc_fw_is_running(&gsc->fw))
+ if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) {
+ drm_dbg_kms(&i915->drm,
+ "GSC components required for HDCP2.2 are not ready\n");
return false;
+ }
}
/* MEI/GSC interface is solid depending on which is used */
@@ -983,6 +986,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
struct drm_device *dev = connector->base.dev;
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
@@ -1001,7 +1005,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
hdcp->value = value;
if (update_property) {
drm_connector_get(&connector->base);
- schedule_work(&hdcp->prop_work);
+ queue_work(i915->unordered_wq, &hdcp->prop_work);
}
}
@@ -2090,16 +2094,17 @@ static void intel_hdcp_check_work(struct work_struct *work)
struct intel_hdcp,
check_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (drm_connector_is_unregistered(&connector->base))
return;
if (!intel_hdcp2_check_link(connector))
- schedule_delayed_work(&hdcp->check_work,
- DRM_HDCP2_CHECK_PERIOD_MS);
+ queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ DRM_HDCP2_CHECK_PERIOD_MS);
else if (!intel_hdcp_check_link(connector))
- schedule_delayed_work(&hdcp->check_work,
- DRM_HDCP_CHECK_PERIOD_MS);
+ queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
}
static int i915_hdcp_component_bind(struct device *i915_kdev,
@@ -2356,7 +2361,7 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
mutex_lock(&dig_port->hdcp_mutex);
drm_WARN_ON(&i915->drm,
hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
- hdcp->content_type = (u8)conn_state->content_type;
+ hdcp->content_type = (u8)conn_state->hdcp_content_type;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
@@ -2398,7 +2403,8 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
}
if (!ret) {
- schedule_delayed_work(&hdcp->check_work, check_link_interval);
+ queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+ check_link_interval);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_ENABLED,
true);
@@ -2447,6 +2453,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp;
bool content_protection_type_changed, desired_and_not_enabled = false;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (!connector->hdcp.shim)
return;
@@ -2473,7 +2480,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
drm_connector_get(&connector->base);
- schedule_work(&hdcp->prop_work);
+ queue_work(i915->unordered_wq, &hdcp->prop_work);
mutex_unlock(&hdcp->mutex);
}
@@ -2490,7 +2497,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
*/
if (!desired_and_not_enabled && !content_protection_type_changed) {
drm_connector_get(&connector->base);
- schedule_work(&hdcp->prop_work);
+ queue_work(i915->unordered_wq, &hdcp->prop_work);
}
}
@@ -2602,6 +2609,7 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (!hdcp->shim)
return;
@@ -2609,5 +2617,5 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
atomic_inc(&connector->hdcp.cp_irq_count);
wake_up_all(&connector->hdcp.cp_irq_queue);
- schedule_delayed_work(&hdcp->check_work, 0);
+ queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 72573ce1d0e9..d753db3eef15 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -6,6 +6,7 @@
#include <drm/i915_hdcp_interface.h>
#include "gem/i915_gem_region.h"
+#include "gt/intel_gt.h"
#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
#include "i915_drv.h"
#include "i915_utils.h"
@@ -621,24 +622,26 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
struct intel_gt *gt = i915->media_gt;
struct drm_i915_gem_object *obj = NULL;
struct i915_vma *vma = NULL;
- void *cmd;
+ void *cmd_in, *cmd_out;
int err;
- /* allocate object of one page for HDCP command memory and store it */
- obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+ /* allocate object of two page for HDCP command memory and store it */
+ obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE);
if (IS_ERR(obj)) {
drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n");
return PTR_ERR(obj);
}
- cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
- if (IS_ERR(cmd)) {
+ cmd_in = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
+ if (IS_ERR(cmd_in)) {
drm_err(&i915->drm, "Failed to map gsc message page!\n");
- err = PTR_ERR(cmd);
+ err = PTR_ERR(cmd_in);
goto out_unpin;
}
+ cmd_out = cmd_in + PAGE_SIZE;
+
vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -649,9 +652,10 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
if (err)
goto out_unmap;
- memset(cmd, 0, obj->base.size);
+ memset(cmd_in, 0, obj->base.size);
- hdcp_message->hdcp_cmd = cmd;
+ hdcp_message->hdcp_cmd_in = cmd_in;
+ hdcp_message->hdcp_cmd_out = cmd_out;
hdcp_message->vma = vma;
return 0;
@@ -691,6 +695,8 @@ static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915)
struct intel_hdcp_gsc_message *hdcp_message =
i915->display.hdcp.hdcp_message;
+ hdcp_message->hdcp_cmd_in = NULL;
+ hdcp_message->hdcp_cmd_out = NULL;
i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP);
kfree(hdcp_message);
}
@@ -721,38 +727,42 @@ void intel_hdcp_gsc_fini(struct drm_i915_private *i915)
}
static int intel_gsc_send_sync(struct drm_i915_private *i915,
- struct intel_gsc_mtl_header *header, u64 addr,
+ struct intel_gsc_mtl_header *header_in,
+ struct intel_gsc_mtl_header *header_out,
+ u64 addr_in, u64 addr_out,
size_t msg_out_len)
{
struct intel_gt *gt = i915->media_gt;
int ret;
- header->flags = 0;
- ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr,
- header->message_size,
- addr,
- msg_out_len + sizeof(*header));
+ ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr_in,
+ header_in->message_size,
+ addr_out,
+ msg_out_len + sizeof(*header_out));
if (ret) {
drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret);
return ret;
}
/*
- * Checking validity marker for memory sanity
+ * Checking validity marker and header status to see if some error has
+ * blocked us from sending message to gsc cs
*/
- if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) {
+ if (header_out->validity_marker != GSC_HECI_VALIDITY_MARKER) {
drm_err(&i915->drm, "invalid validity marker\n");
return -EINVAL;
}
- if (header->status != 0) {
+ if (header_out->status != 0) {
drm_err(&i915->drm, "header status indicates error %d\n",
- header->status);
+ header_out->status);
return -EINVAL;
}
- if (header->flags & GSC_OUTFLAG_MSG_PENDING)
+ if (header_out->flags & GSC_OUTFLAG_MSG_PENDING) {
+ header_in->gsc_message_handle = header_out->gsc_message_handle;
return -EAGAIN;
+ }
return 0;
}
@@ -769,11 +779,11 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
size_t msg_out_len)
{
struct intel_gt *gt = i915->media_gt;
- struct intel_gsc_mtl_header *header;
- const size_t max_msg_size = PAGE_SIZE - sizeof(*header);
+ struct intel_gsc_mtl_header *header_in, *header_out;
+ const size_t max_msg_size = PAGE_SIZE - sizeof(*header_in);
struct intel_hdcp_gsc_message *hdcp_message;
- u64 addr, host_session_id;
- u32 reply_size, msg_size;
+ u64 addr_in, addr_out, host_session_id;
+ u32 reply_size, msg_size_in, msg_size_out;
int ret, tries = 0;
if (!intel_uc_uses_gsc_uc(&gt->uc))
@@ -782,16 +792,20 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
return -ENOSPC;
+ msg_size_in = msg_in_len + sizeof(*header_in);
+ msg_size_out = msg_out_len + sizeof(*header_out);
hdcp_message = i915->display.hdcp.hdcp_message;
- header = hdcp_message->hdcp_cmd;
- addr = i915_ggtt_offset(hdcp_message->vma);
+ header_in = hdcp_message->hdcp_cmd_in;
+ header_out = hdcp_message->hdcp_cmd_out;
+ addr_in = i915_ggtt_offset(hdcp_message->vma);
+ addr_out = addr_in + PAGE_SIZE;
- msg_size = msg_in_len + sizeof(*header);
- memset(header, 0, msg_size);
+ memset(header_in, 0, msg_size_in);
+ memset(header_out, 0, msg_size_out);
get_random_bytes(&host_session_id, sizeof(u64));
- intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_HDCP,
- msg_size, host_session_id);
- memcpy(hdcp_message->hdcp_cmd + sizeof(*header), msg_in, msg_in_len);
+ intel_gsc_uc_heci_cmd_emit_mtl_header(header_in, HECI_MEADDRESS_HDCP,
+ msg_size_in, host_session_id);
+ memcpy(hdcp_message->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
/*
* Keep sending request in case the pending bit is set no need to add
@@ -800,7 +814,8 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
* 20 times each message 50 ms apart
*/
do {
- ret = intel_gsc_send_sync(i915, header, addr, msg_out_len);
+ ret = intel_gsc_send_sync(i915, header_in, header_out, addr_in,
+ addr_out, msg_out_len);
/* Only try again if gsc says so */
if (ret != -EAGAIN)
@@ -814,7 +829,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
goto err;
/* we use the same mem for the reply, so header is in the same loc */
- reply_size = header->message_size - sizeof(*header);
+ reply_size = header_out->message_size - sizeof(*header_out);
if (reply_size > msg_out_len) {
drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n",
reply_size, (u32)msg_out_len);
@@ -824,7 +839,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
reply_size, (u32)msg_out_len);
}
- memcpy(msg_out, hdcp_message->hdcp_cmd + sizeof(*header), msg_out_len);
+ memcpy(msg_out, hdcp_message->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
err:
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
index 5cc9fd2e88f6..cbf96551e534 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -13,7 +13,8 @@ struct drm_i915_private;
struct intel_hdcp_gsc_message {
struct i915_vma *vma;
- void *hdcp_cmd;
+ void *hdcp_cmd_in;
+ void *hdcp_cmd_out;
};
bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 7ac5e6c5e00d..94a7e1537f42 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2880,21 +2880,12 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin;
}
-static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
+static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
u8 ddc_pin;
- ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata);
- if (ddc_pin) {
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] Using DDC pin 0x%x (VBT)\n",
- encoder->base.base.id, encoder->base.name,
- ddc_pin);
- return ddc_pin;
- }
-
if (IS_ALDERLAKE_S(dev_priv))
ddc_pin = adls_port_to_ddc_pin(dev_priv, port);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
@@ -2903,7 +2894,8 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
- else if (IS_JSL_EHL(dev_priv) && HAS_PCH_TGP(dev_priv))
+ else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
+ HAS_PCH_TGP(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
@@ -2916,10 +2908,62 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] Using DDC pin 0x%x (platform default)\n",
+ return ddc_pin;
+}
+
+static struct intel_encoder *
+get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_encoder *other;
+
+ for_each_intel_encoder(&i915->drm, other) {
+ if (other == encoder)
+ continue;
+
+ if (!intel_encoder_is_dig_port(other))
+ continue;
+
+ if (enc_to_dig_port(other)->hdmi.ddc_bus == ddc_pin)
+ return other;
+ }
+
+ return NULL;
+}
+
+static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_encoder *other;
+ const char *source;
+ u8 ddc_pin;
+
+ ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata);
+ source = "VBT";
+
+ if (!ddc_pin) {
+ ddc_pin = intel_hdmi_default_ddc_pin(encoder);
+ source = "platform default";
+ }
+
+ if (!intel_gmbus_is_valid_pin(i915, ddc_pin)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Invalid DDC pin %d\n",
+ encoder->base.base.id, encoder->base.name, ddc_pin);
+ return 0;
+ }
+
+ other = get_encoder_by_ddc_pin(encoder, ddc_pin);
+ if (other) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n",
+ encoder->base.base.id, encoder->base.name, ddc_pin,
+ other->base.base.id, other->base.name);
+ return 0;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Using DDC pin 0x%x (%s)\n",
encoder->base.base.id, encoder->base.name,
- ddc_pin);
+ ddc_pin, source);
return ddc_pin;
}
@@ -2990,6 +3034,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
return;
intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(intel_encoder);
+ if (!intel_hdmi->ddc_bus)
+ return;
+
ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
drm_connector_init_with_ddc(dev, connector,
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 23a5e1a875f1..0ff5ed46ae1e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -212,7 +212,8 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
drm_kms_helper_poll_enable(&dev_priv->drm);
- mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work,
+ mod_delayed_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
@@ -339,7 +340,8 @@ static void i915_digport_work_func(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->display.hotplug.event_bits |= old_bits;
spin_unlock_irq(&dev_priv->irq_lock);
- queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
+ queue_delayed_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.hotplug_work, 0);
}
}
@@ -374,6 +376,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
u32 changed = 0, retry = 0;
u32 hpd_event_bits;
u32 hpd_retry_bits;
+ struct drm_connector *first_changed_connector = NULL;
+ int changed_connectors = 0;
mutex_lock(&dev_priv->drm.mode_config.mutex);
drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
@@ -426,6 +430,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
break;
case INTEL_HOTPLUG_CHANGED:
changed |= hpd_bit;
+ changed_connectors++;
+ if (!first_changed_connector) {
+ drm_connector_get(&connector->base);
+ first_changed_connector = &connector->base;
+ }
break;
case INTEL_HOTPLUG_RETRY:
retry |= hpd_bit;
@@ -436,9 +445,14 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev_priv->drm.mode_config.mutex);
- if (changed)
+ if (changed_connectors == 1)
+ drm_kms_helper_connector_hotplug_event(first_changed_connector);
+ else if (changed_connectors > 0)
drm_kms_helper_hotplug_event(&dev_priv->drm);
+ if (first_changed_connector)
+ drm_connector_put(first_changed_connector);
+
/* Remove shared HPD pins that have changed */
retry &= ~changed;
if (retry) {
@@ -446,7 +460,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
dev_priv->display.hotplug.retry_bits |= retry;
spin_unlock_irq(&dev_priv->irq_lock);
- mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work,
+ mod_delayed_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
}
}
@@ -577,7 +592,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (queue_dig)
queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
+ queue_delayed_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.hotplug_work, 0);
}
/**
@@ -687,7 +703,8 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
- schedule_work(&dev_priv->display.hotplug.poll_init_work);
+ queue_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.poll_init_work);
}
/**
@@ -715,7 +732,8 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
return;
WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
- schedule_work(&dev_priv->display.hotplug.poll_init_work);
+ queue_work(dev_priv->unordered_wq,
+ &dev_priv->display.hotplug.poll_init_work);
}
void intel_hpd_init_early(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index f95fa793fabb..95a7ea94f417 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -842,6 +842,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ else
+ intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -1049,7 +1051,7 @@ static void mtp_hpd_irq_setup(struct drm_i915_private *i915)
enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
- intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
mtp_hpd_invert(i915);
ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 5ff99ca7f1de..b8f43efb0ab5 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -26,6 +26,7 @@
#include "intel_fifo_underrun.h"
#include "intel_modeset_setup.h"
#include "intel_pch_display.h"
+#include "intel_pmdemand.h"
#include "intel_tc.h"
#include "intel_vblank.h"
#include "intel_wm.h"
@@ -115,6 +116,8 @@ static void set_encoder_for_connector(struct intel_connector *connector,
static void reset_encoder_connector_state(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_pmdemand_state *pmdemand_state =
+ to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
@@ -123,6 +126,10 @@ static void reset_encoder_connector_state(struct intel_encoder *encoder)
if (connector->base.encoder != &encoder->base)
continue;
+ /* Clear the corresponding bit in pmdemand active phys mask */
+ intel_pmdemand_update_phys_mask(i915, encoder,
+ pmdemand_state, false);
+
set_encoder_for_connector(connector, NULL);
connector->base.dpms = DRM_MODE_DPMS_OFF;
@@ -151,6 +158,8 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
to_intel_cdclk_state(i915->display.cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct intel_pmdemand_state *pmdemand_state =
+ to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -174,6 +183,8 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
+
+ intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe, 0);
}
/*
@@ -552,6 +563,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_crtc_state *crtc_state = crtc ?
to_intel_crtc_state(crtc->base.state) : NULL;
+ struct intel_pmdemand_state *pmdemand_state =
+ to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
/*
* We need to check both for a crtc link (meaning that the encoder is
@@ -575,6 +588,10 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
encoder->base.base.id,
encoder->base.name);
+ /* Clear the corresponding bit in pmdemand active phys mask */
+ intel_pmdemand_update_phys_mask(i915, encoder,
+ pmdemand_state, false);
+
/*
* Connector is active, but has no active pipe. This is fallout
* from our resume register restoring. Disable the encoder
@@ -661,6 +678,8 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
to_intel_cdclk_state(i915->display.cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct intel_pmdemand_state *pmdemand_state =
+ to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
@@ -724,7 +743,15 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
intel_encoder_get_config(encoder, slave_crtc_state);
}
}
+
+ intel_pmdemand_update_phys_mask(i915, encoder,
+ pmdemand_state,
+ true);
} else {
+ intel_pmdemand_update_phys_mask(i915, encoder,
+ pmdemand_state,
+ false);
+
encoder->base.crtc = NULL;
}
@@ -841,8 +868,13 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
cdclk_state->min_voltage_level[crtc->pipe] =
crtc_state->min_voltage_level;
+ intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe,
+ crtc_state->port_clock);
+
intel_bw_crtc_update(bw_state, crtc_state);
}
+
+ intel_pmdemand_init_pmdemand_params(i915, pmdemand_state);
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index b7973a05d022..84078fb82b2f 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -635,7 +635,8 @@ static void asle_work(struct work_struct *work)
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
if (dev_priv->display.opregion.asle)
- schedule_work(&dev_priv->display.opregion.asle_work);
+ queue_work(dev_priv->unordered_wq,
+ &dev_priv->display.opregion.asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index d6fe2bbabe55..09c1aa1427ad 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -1348,11 +1348,12 @@ out_unlock:
static int get_registers(struct intel_overlay *overlay, bool use_phys)
{
struct drm_i915_private *i915 = overlay->i915;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = ERR_PTR(-ENODEV);
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
+ if (!IS_METEORLAKE(i915)) /* Wa_22018444074 */
+ obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index f4c09cc37a5e..9583e86b602a 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -423,7 +423,7 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
return true;
- if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
+ if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) &&
(ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
new file mode 100644
index 000000000000..f7608d363634
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/bitops.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_atomic.h"
+#include "intel_bw.h"
+#include "intel_cdclk.h"
+#include "intel_de.h"
+#include "intel_display_trace.h"
+#include "intel_pmdemand.h"
+#include "skl_watermark.h"
+
+static struct intel_global_state *
+intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_pmdemand_state *pmdemand_state;
+
+ pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL);
+ if (!pmdemand_state)
+ return NULL;
+
+ return &pmdemand_state->base;
+}
+
+static void intel_pmdemand_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_pmdemand_funcs = {
+ .atomic_duplicate_state = intel_pmdemand_duplicate_state,
+ .atomic_destroy_state = intel_pmdemand_destroy_state,
+};
+
+static struct intel_pmdemand_state *
+intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_global_state *pmdemand_state =
+ intel_atomic_get_global_obj_state(state,
+ &i915->display.pmdemand.obj);
+
+ if (IS_ERR(pmdemand_state))
+ return ERR_CAST(pmdemand_state);
+
+ return to_intel_pmdemand_state(pmdemand_state);
+}
+
+static struct intel_pmdemand_state *
+intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_global_state *pmdemand_state =
+ intel_atomic_get_old_global_obj_state(state,
+ &i915->display.pmdemand.obj);
+
+ if (!pmdemand_state)
+ return NULL;
+
+ return to_intel_pmdemand_state(pmdemand_state);
+}
+
+static struct intel_pmdemand_state *
+intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_global_state *pmdemand_state =
+ intel_atomic_get_new_global_obj_state(state,
+ &i915->display.pmdemand.obj);
+
+ if (!pmdemand_state)
+ return NULL;
+
+ return to_intel_pmdemand_state(pmdemand_state);
+}
+
+int intel_pmdemand_init(struct drm_i915_private *i915)
+{
+ struct intel_pmdemand_state *pmdemand_state;
+
+ pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL);
+ if (!pmdemand_state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(i915, &i915->display.pmdemand.obj,
+ &pmdemand_state->base,
+ &intel_pmdemand_funcs);
+
+ if (IS_MTL_DISPLAY_STEP(i915, STEP_A0, STEP_C0))
+ /* Wa_14016740474 */
+ intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
+
+ return 0;
+}
+
+void intel_pmdemand_init_early(struct drm_i915_private *i915)
+{
+ mutex_init(&i915->display.pmdemand.lock);
+ init_waitqueue_head(&i915->display.pmdemand.waitqueue);
+}
+
+void
+intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
+ struct intel_encoder *encoder,
+ struct intel_pmdemand_state *pmdemand_state,
+ bool set_bit)
+{
+ enum phy phy;
+
+ if (DISPLAY_VER(i915) < 14)
+ return;
+
+ if (!encoder)
+ return;
+
+ phy = intel_port_to_phy(i915, encoder->port);
+ if (intel_phy_is_tc(i915, phy))
+ return;
+
+ if (set_bit)
+ pmdemand_state->active_combo_phys_mask |= BIT(phy);
+ else
+ pmdemand_state->active_combo_phys_mask &= ~BIT(phy);
+}
+
+void
+intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
+ struct intel_pmdemand_state *pmdemand_state,
+ enum pipe pipe, int port_clock)
+{
+ if (DISPLAY_VER(i915) < 14)
+ return;
+
+ pmdemand_state->ddi_clocks[pipe] = port_clock;
+}
+
+static void
+intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ struct intel_pmdemand_state *pmdemand_state)
+{
+ int max_ddiclk = 0;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ intel_pmdemand_update_port_clock(i915, pmdemand_state,
+ crtc->pipe,
+ new_crtc_state->port_clock);
+
+ for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++)
+ max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk);
+
+ pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000);
+}
+
+static void
+intel_pmdemand_update_connector_phys(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ struct drm_connector_state *conn_state,
+ bool set_bit,
+ struct intel_pmdemand_state *pmdemand_state)
+{
+ struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
+ struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
+ struct intel_crtc_state *crtc_state;
+
+ if (!crtc)
+ return;
+
+ if (set_bit)
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ else
+ crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+
+ if (!crtc_state->hw.active)
+ return;
+
+ intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state,
+ set_bit);
+}
+
+static void
+intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ struct intel_pmdemand_state *pmdemand_state)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_oldnew_connector_in_state(&state->base, connector,
+ old_conn_state, new_conn_state, i) {
+ if (!intel_connector_needs_modeset(state, connector))
+ continue;
+
+ /* First clear the active phys in the old connector state */
+ intel_pmdemand_update_connector_phys(i915, state,
+ old_conn_state, false,
+ pmdemand_state);
+
+ /* Then set the active phys in new connector state */
+ intel_pmdemand_update_connector_phys(i915, state,
+ new_conn_state, true,
+ pmdemand_state);
+ }
+
+ pmdemand_state->params.active_phys =
+ min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask),
+ 7);
+}
+
+static bool
+intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
+ struct intel_encoder *encoder)
+{
+ enum phy phy;
+
+ if (!encoder)
+ return false;
+
+ phy = intel_port_to_phy(i915, encoder->port);
+
+ return intel_phy_is_tc(i915, phy);
+}
+
+static bool
+intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_oldnew_connector_in_state(&state->base, connector,
+ old_conn_state, new_conn_state, i) {
+ struct intel_encoder *old_encoder =
+ to_intel_encoder(old_conn_state->best_encoder);
+ struct intel_encoder *new_encoder =
+ to_intel_encoder(new_conn_state->best_encoder);
+
+ if (!intel_connector_needs_modeset(state, connector))
+ continue;
+
+ if (old_encoder == new_encoder ||
+ (intel_pmdemand_encoder_has_tc_phy(i915, old_encoder) &&
+ intel_pmdemand_encoder_has_tc_phy(i915, new_encoder)))
+ continue;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
+{
+ const struct intel_bw_state *new_bw_state, *old_bw_state;
+ const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
+ const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ if (new_bw_state && new_bw_state->qgv_point_peakbw !=
+ old_bw_state->qgv_point_peakbw)
+ return true;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ if (new_dbuf_state &&
+ (new_dbuf_state->active_pipes !=
+ old_dbuf_state->active_pipes ||
+ new_dbuf_state->enabled_slices !=
+ old_dbuf_state->enabled_slices))
+ return true;
+
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+ if (new_cdclk_state &&
+ (new_cdclk_state->actual.cdclk !=
+ old_cdclk_state->actual.cdclk ||
+ new_cdclk_state->actual.voltage_level !=
+ old_cdclk_state->actual.voltage_level))
+ return true;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i)
+ if (new_crtc_state->port_clock != old_crtc_state->port_clock)
+ return true;
+
+ return intel_pmdemand_connector_needs_update(state);
+}
+
+int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state;
+ const struct intel_cdclk_state *new_cdclk_state;
+ const struct intel_dbuf_state *new_dbuf_state;
+ struct intel_pmdemand_state *new_pmdemand_state;
+
+ if (DISPLAY_VER(i915) < 14)
+ return 0;
+
+ if (!intel_pmdemand_needs_update(state))
+ return 0;
+
+ new_pmdemand_state = intel_atomic_get_pmdemand_state(state);
+ if (IS_ERR(new_pmdemand_state))
+ return PTR_ERR(new_pmdemand_state);
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ /* firmware will calculate the qclk_gv_index, requirement is set to 0 */
+ new_pmdemand_state->params.qclk_gv_index = 0;
+ new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
+
+ new_dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(new_dbuf_state))
+ return PTR_ERR(new_dbuf_state);
+
+ new_pmdemand_state->params.active_pipes =
+ min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
+ new_pmdemand_state->params.active_dbufs =
+ min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
+
+ new_cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(new_cdclk_state))
+ return PTR_ERR(new_cdclk_state);
+
+ new_pmdemand_state->params.voltage_index =
+ new_cdclk_state->actual.voltage_level;
+ new_pmdemand_state->params.cdclk_freq_mhz =
+ DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
+
+ intel_pmdemand_update_max_ddiclk(i915, state, new_pmdemand_state);
+
+ intel_pmdemand_update_active_non_tc_phys(i915, state, new_pmdemand_state);
+
+ /*
+ * Active_PLLs starts with 1 because of CDCLK PLL.
+ * TODO: Missing to account genlock filter when it gets used.
+ */
+ new_pmdemand_state->params.plls =
+ min_t(u16, new_pmdemand_state->params.active_phys + 1, 7);
+
+ /*
+ * Setting scalers to max as it can not be calculated during flips and
+ * fastsets without taking global states locks.
+ */
+ new_pmdemand_state->params.scalers = 7;
+
+ if (state->base.allow_modeset)
+ return intel_atomic_serialize_global_state(&new_pmdemand_state->base);
+ else
+ return intel_atomic_lock_global_state(&new_pmdemand_state->base);
+}
+
+static bool intel_pmdemand_check_prev_transaction(struct drm_i915_private *i915)
+{
+ return !(intel_de_wait_for_clear(i915,
+ XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
+ intel_de_wait_for_clear(i915,
+ GEN12_DCPR_STATUS_1,
+ XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
+}
+
+void
+intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
+ struct intel_pmdemand_state *pmdemand_state)
+{
+ u32 reg1, reg2;
+
+ if (DISPLAY_VER(i915) < 14)
+ return;
+
+ mutex_lock(&i915->display.pmdemand.lock);
+ if (drm_WARN_ON(&i915->drm,
+ !intel_pmdemand_check_prev_transaction(i915))) {
+ memset(&pmdemand_state->params, 0,
+ sizeof(pmdemand_state->params));
+ goto unlock;
+ }
+
+ reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
+
+ reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
+
+ /* Set 1*/
+ pmdemand_state->params.qclk_gv_bw =
+ REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
+ pmdemand_state->params.voltage_index =
+ REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
+ pmdemand_state->params.qclk_gv_index =
+ REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
+ pmdemand_state->params.active_pipes =
+ REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
+ pmdemand_state->params.active_dbufs =
+ REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
+ pmdemand_state->params.active_phys =
+ REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
+
+ /* Set 2*/
+ pmdemand_state->params.cdclk_freq_mhz =
+ REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
+ pmdemand_state->params.ddiclk_max =
+ REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
+ pmdemand_state->params.scalers =
+ REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
+
+unlock:
+ mutex_unlock(&i915->display.pmdemand.lock);
+}
+
+static bool intel_pmdemand_req_complete(struct drm_i915_private *i915)
+{
+ return !(intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
+ XELPDP_PMDEMAND_REQ_ENABLE);
+}
+
+static void intel_pmdemand_wait(struct drm_i915_private *i915)
+{
+ if (!wait_event_timeout(i915->display.pmdemand.waitqueue,
+ intel_pmdemand_req_complete(i915),
+ msecs_to_jiffies_timeout(10)))
+ drm_err(&i915->drm,
+ "timed out waiting for Punit PM Demand Response\n");
+}
+
+/* Required to be programmed during Display Init Sequences. */
+void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
+ u8 dbuf_slices)
+{
+ u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
+
+ mutex_lock(&i915->display.pmdemand.lock);
+ if (drm_WARN_ON(&i915->drm,
+ !intel_pmdemand_check_prev_transaction(i915)))
+ goto unlock;
+
+ intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
+ XELPDP_PMDEMAND_DBUFS_MASK,
+ REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
+ intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
+ XELPDP_PMDEMAND_REQ_ENABLE);
+
+ intel_pmdemand_wait(i915);
+
+unlock:
+ mutex_unlock(&i915->display.pmdemand.lock);
+}
+
+static void
+intel_pmdemand_update_params(const struct intel_pmdemand_state *new,
+ const struct intel_pmdemand_state *old,
+ u32 *reg1, u32 *reg2, bool serialized)
+{
+ /*
+ * The pmdemand parameter updates happens in two steps. Pre plane and
+ * post plane updates. During the pre plane, as DE might still be
+ * handling with some old operations, to avoid unexpected performance
+ * issues, program the pmdemand parameters with higher of old and new
+ * values. And then after once settled, use the new parameter values
+ * as part of the post plane update.
+ *
+ * If the pmdemand params update happens without modeset allowed, this
+ * means we can't serialize the updates. So that implies possibility of
+ * some parallel atomic commits affecting the pmdemand parameters. In
+ * that case, we need to consider the current values from the register
+ * as well. So in pre-plane case, we need to check the max of old, new
+ * and current register value if not serialized. In post plane update
+ * we need to consider max of new and current register value if not
+ * serialized
+ */
+
+#define update_reg(reg, field, mask) do { \
+ u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \
+ u32 old_val = old ? old->params.field : 0; \
+ u32 new_val = new->params.field; \
+\
+ *(reg) &= ~(mask); \
+ *(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \
+} while (0)
+
+ /* Set 1*/
+ update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
+ update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
+ update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
+ update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
+ update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
+ update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
+
+ /* Set 2*/
+ update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
+ update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
+ update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
+ update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
+
+#undef update_reg
+}
+
+static void
+intel_pmdemand_program_params(struct drm_i915_private *i915,
+ const struct intel_pmdemand_state *new,
+ const struct intel_pmdemand_state *old,
+ bool serialized)
+{
+ bool changed = false;
+ u32 reg1, mod_reg1;
+ u32 reg2, mod_reg2;
+
+ mutex_lock(&i915->display.pmdemand.lock);
+ if (drm_WARN_ON(&i915->drm,
+ !intel_pmdemand_check_prev_transaction(i915)))
+ goto unlock;
+
+ reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
+ mod_reg1 = reg1;
+
+ reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
+ mod_reg2 = reg2;
+
+ intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2,
+ serialized);
+
+ if (reg1 != mod_reg1) {
+ intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
+ mod_reg1);
+ changed = true;
+ }
+
+ if (reg2 != mod_reg2) {
+ intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ mod_reg2);
+ changed = true;
+ }
+
+ /* Initiate pm demand request only if register values are changed */
+ if (!changed)
+ goto unlock;
+
+ drm_dbg_kms(&i915->drm,
+ "initate pmdemand request values: (0x%x 0x%x)\n",
+ mod_reg1, mod_reg2);
+
+ intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
+ XELPDP_PMDEMAND_REQ_ENABLE);
+
+ intel_pmdemand_wait(i915);
+
+unlock:
+ mutex_unlock(&i915->display.pmdemand.lock);
+}
+
+static bool
+intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
+ const struct intel_pmdemand_state *old)
+{
+ return memcmp(&new->params, &old->params, sizeof(new->params)) != 0;
+}
+
+void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_pmdemand_state *new_pmdemand_state =
+ intel_atomic_get_new_pmdemand_state(state);
+ const struct intel_pmdemand_state *old_pmdemand_state =
+ intel_atomic_get_old_pmdemand_state(state);
+
+ if (DISPLAY_VER(i915) < 14)
+ return;
+
+ if (!new_pmdemand_state ||
+ !intel_pmdemand_state_changed(new_pmdemand_state,
+ old_pmdemand_state))
+ return;
+
+ WARN_ON(!new_pmdemand_state->base.changed);
+
+ intel_pmdemand_program_params(i915, new_pmdemand_state,
+ old_pmdemand_state,
+ intel_atomic_global_state_is_serialized(state));
+}
+
+void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_pmdemand_state *new_pmdemand_state =
+ intel_atomic_get_new_pmdemand_state(state);
+ const struct intel_pmdemand_state *old_pmdemand_state =
+ intel_atomic_get_old_pmdemand_state(state);
+
+ if (DISPLAY_VER(i915) < 14)
+ return;
+
+ if (!new_pmdemand_state ||
+ !intel_pmdemand_state_changed(new_pmdemand_state,
+ old_pmdemand_state))
+ return;
+
+ WARN_ON(!new_pmdemand_state->base.changed);
+
+ intel_pmdemand_program_params(i915, new_pmdemand_state, NULL,
+ intel_atomic_global_state_is_serialized(state));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h
new file mode 100644
index 000000000000..2941a1a18b72
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_PMDEMAND_H__
+#define __INTEL_PMDEMAND_H__
+
+#include "intel_display_limits.h"
+#include "intel_global_state.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_plane_state;
+
+struct pmdemand_params {
+ u16 qclk_gv_bw;
+ u8 voltage_index;
+ u8 qclk_gv_index;
+ u8 active_pipes;
+ u8 active_dbufs;
+ /* Total number of non type C active phys from active_phys_mask */
+ u8 active_phys;
+ u8 plls;
+ u16 cdclk_freq_mhz;
+ /* max from ddi_clocks[] */
+ u16 ddiclk_max;
+ u8 scalers;
+};
+
+struct intel_pmdemand_state {
+ struct intel_global_state base;
+
+ /* Maintain a persistent list of port clocks across all crtcs */
+ int ddi_clocks[I915_MAX_PIPES];
+
+ /* Maintain a persistent list of non type C phys mask */
+ u16 active_combo_phys_mask;
+
+ /* Parameters to be configured in the pmdemand registers */
+ struct pmdemand_params params;
+};
+
+#define to_intel_pmdemand_state(x) container_of((x), \
+ struct intel_pmdemand_state, \
+ base)
+
+void intel_pmdemand_init_early(struct drm_i915_private *i915);
+int intel_pmdemand_init(struct drm_i915_private *i915);
+void intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
+ struct intel_pmdemand_state *pmdemand_state);
+void intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
+ struct intel_pmdemand_state *pmdemand_state,
+ enum pipe pipe, int port_clock);
+void intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
+ struct intel_encoder *encoder,
+ struct intel_pmdemand_state *pmdemand_state,
+ bool clear_bit);
+void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
+ u8 dbuf_slices);
+void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state);
+void intel_pmdemand_post_plane_update(struct intel_atomic_state *state);
+int intel_pmdemand_atomic_check(struct intel_atomic_state *state);
+
+#endif /* __INTEL_PMDEMAND_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 5e7ba594e7e7..73f0f1714b37 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -867,6 +867,7 @@ static void edp_panel_vdd_work(struct work_struct *__work)
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
unsigned long delay;
/*
@@ -882,7 +883,8 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
* operations.
*/
delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
- schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
+ queue_delayed_work(i915->unordered_wq,
+ &intel_dp->pps.panel_vdd_work, delay);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index ea0389c5f656..97d5eef10130 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -234,23 +234,91 @@ static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
EDP_PSR_MASK(intel_dp->psr.transcoder);
}
-static void psr_irq_control(struct intel_dp *intel_dp)
+static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- i915_reg_t imr_reg;
- u32 mask;
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return EDP_PSR_CTL(cpu_transcoder);
+ else
+ return HSW_SRD_CTL;
+}
+
+static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return EDP_PSR_DEBUG(cpu_transcoder);
+ else
+ return HSW_SRD_DEBUG;
+}
+
+static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return EDP_PSR_PERF_CNT(cpu_transcoder);
+ else
+ return HSW_SRD_PERF_CNT;
+}
+
+static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return EDP_PSR_STATUS(cpu_transcoder);
+ else
+ return HSW_SRD_STATUS;
+}
+static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ if (DISPLAY_VER(dev_priv) >= 12)
+ return TRANS_PSR_IMR(cpu_transcoder);
+ else
+ return EDP_PSR_IMR;
+}
+
+static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
if (DISPLAY_VER(dev_priv) >= 12)
- imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
+ return TRANS_PSR_IIR(cpu_transcoder);
+ else
+ return EDP_PSR_IIR;
+}
+
+static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return EDP_PSR_AUX_CTL(cpu_transcoder);
+ else
+ return HSW_SRD_AUX_CTL;
+}
+
+static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, int i)
+{
+ if (DISPLAY_VER(dev_priv) >= 8)
+ return EDP_PSR_AUX_DATA(cpu_transcoder, i);
else
- imr_reg = EDP_PSR_IMR;
+ return HSW_SRD_AUX_DATA(i);
+}
+
+static void psr_irq_control(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ u32 mask;
mask = psr_irq_psr_error_bit_get(intel_dp);
if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
mask |= psr_irq_post_exit_bit_get(intel_dp) |
psr_irq_pre_entry_bit_get(intel_dp);
- intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask);
+ intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
+ psr_irq_mask_get(intel_dp), ~mask);
}
static void psr_event_print(struct drm_i915_private *i915,
@@ -296,12 +364,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
ktime_t time_ns = ktime_get();
- i915_reg_t imr_reg;
-
- if (DISPLAY_VER(dev_priv) >= 12)
- imr_reg = TRANS_PSR_IMR(cpu_transcoder);
- else
- imr_reg = EDP_PSR_IMR;
if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
intel_dp->psr.last_entry_attempt = time_ns;
@@ -339,9 +401,10 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
- intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
+ intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
+ 0, psr_irq_psr_error_bit_get(intel_dp));
- schedule_work(&intel_dp->psr.work);
+ queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
}
}
@@ -467,6 +530,43 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
}
}
+static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ u32 aux_clock_divider, aux_ctl;
+ /* write DP_SET_POWER=D0 */
+ static const u8 aux_msg[] = {
+ [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
+ [1] = (DP_SET_POWER >> 8) & 0xff,
+ [2] = DP_SET_POWER & 0xff,
+ [3] = 1 - 1,
+ [4] = DP_SET_POWER_D0,
+ };
+ int i;
+
+ BUILD_BUG_ON(sizeof(aux_msg) > 20);
+ for (i = 0; i < sizeof(aux_msg); i += 4)
+ intel_de_write(dev_priv,
+ psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
+ intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
+
+ aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
+ /* Start with bits set for DDI_AUX_CTL register */
+ aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
+ aux_clock_divider);
+
+ /* Select only valid bits for SRD_AUX_CTL */
+ aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
+ EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
+ EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
+ EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
+
+ intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
+ aux_ctl);
+}
+
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -528,6 +628,15 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP2_TP3_TIME_2500us;
+ /*
+ * WA 0479: hsw,bdw
+ * "Do not skip both TP1 and TP2/TP3"
+ */
+ if (DISPLAY_VER(dev_priv) < 9 &&
+ connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
+ connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
+ val |= EDP_PSR_TP2_TP3_TIME_100us;
+
check_tp3_sel:
if (intel_dp_source_supports_tps3(dev_priv) &&
drm_dp_tps3_supported(intel_dp->dpcd))
@@ -577,7 +686,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder),
+ intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
}
@@ -639,7 +748,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -685,7 +794,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- intel_de_write(dev_priv, EDP_PSR_CTL(cpu_transcoder), 0);
+ intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
}
@@ -697,8 +806,10 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_trans
return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
else if (DISPLAY_VER(dev_priv) >= 12)
return cpu_transcoder == TRANSCODER_A;
- else
+ else if (DISPLAY_VER(dev_priv) >= 9)
return cpu_transcoder == TRANSCODER_EDP;
+ else
+ return false;
}
static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
@@ -807,7 +918,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
return;
/*
@@ -933,9 +1044,9 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
}
io_wake_lines = intel_usecs_to_scanlines(
- &crtc_state->uapi.adjusted_mode, io_wake_time);
+ &crtc_state->hw.adjusted_mode, io_wake_time);
fast_wake_lines = intel_usecs_to_scanlines(
- &crtc_state->uapi.adjusted_mode, fast_wake_time);
+ &crtc_state->hw.adjusted_mode, fast_wake_time);
if (io_wake_lines > max_wake_lines ||
fast_wake_lines > max_wake_lines)
@@ -963,7 +1074,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
/* JSL and EHL only supports eDP 1.3 */
- if (IS_JSL_EHL(dev_priv)) {
+ if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
return false;
}
@@ -975,7 +1086,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
return false;
}
@@ -1033,7 +1144,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
- IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
@@ -1201,13 +1312,15 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- if (transcoder_has_psr2(dev_priv, cpu_transcoder))
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ transcoder_has_psr2(dev_priv, cpu_transcoder) &&
+ intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)) & EDP_PSR_ENABLE);
+ intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
+
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
+
lockdep_assert_held(&intel_dp->psr.lock);
/* psr1 and psr2 are mutually exclusive.*/
@@ -1272,6 +1385,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
u32 mask;
/*
+ * Only HSW and BDW have PSR AUX registers that need to be setup.
+ * SKL+ use hardcoded values PSR AUX transactions
+ */
+ if (DISPLAY_VER(dev_priv) < 9)
+ hsw_psr_setup_aux(intel_dp);
+
+ /*
* Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
* mask LPSP to avoid dependency on other drivers that might block
* runtime_pm besides preventing other hw tracking issues now we
@@ -1282,11 +1402,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
EDP_PSR_DEBUG_MASK_LPSP |
EDP_PSR_DEBUG_MASK_MAX_SLEEP;
- if (DISPLAY_VER(dev_priv) < 11)
+ /*
+ * No separate pipe reg write mask on hsw/bdw, so have to unmask all
+ * registers in order to keep the CURSURFLIVE tricks working :(
+ */
+ if (IS_DISPLAY_VER(dev_priv, 9, 10))
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- intel_de_write(dev_priv, EDP_PSR_DEBUG(cpu_transcoder),
- mask);
+ /* allow PSR with sprite enabled */
+ if (IS_HASWELL(dev_priv))
+ mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
+
+ intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
psr_irq_control(intel_dp);
@@ -1352,10 +1479,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
- if (DISPLAY_VER(dev_priv) >= 12)
- val = intel_de_read(dev_priv, TRANS_PSR_IIR(cpu_transcoder));
- else
- val = intel_de_read(dev_priv, EDP_PSR_IIR);
+ val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
val &= psr_irq_psr_error_bit_get(intel_dp);
if (val) {
intel_dp->psr.sink_not_reliable = true;
@@ -1418,7 +1542,7 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
}
- val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder));
+ val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
return;
@@ -1432,7 +1556,7 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
} else {
- val = intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder),
+ val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
EDP_PSR_ENABLE, 0);
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
@@ -1451,7 +1575,7 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
psr_status = EDP_PSR2_STATUS(cpu_transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = EDP_PSR_STATUS(cpu_transcoder);
+ psr_status = psr_status_reg(dev_priv, cpu_transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -2151,7 +2275,7 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
* defensive enough to cover everything.
*/
return intel_de_wait_for_clear(dev_priv,
- EDP_PSR_STATUS(cpu_transcoder),
+ psr_status_reg(dev_priv, cpu_transcoder),
EDP_PSR_STATUS_STATE_MASK, 50);
}
@@ -2205,7 +2329,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
reg = EDP_PSR2_STATUS(cpu_transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = EDP_PSR_STATUS(cpu_transcoder);
+ reg = psr_status_reg(dev_priv, cpu_transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -2440,6 +2564,8 @@ static void
tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
!intel_dp->psr.active)
return;
@@ -2453,7 +2579,7 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
return;
tgl_psr2_enable_dc3co(intel_dp);
- mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
+ mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
intel_dp->psr.dc3co_exit_delay);
}
@@ -2493,7 +2619,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
psr_force_hw_tracking_exit(intel_dp);
if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
- schedule_work(&intel_dp->psr.work);
+ queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
}
}
@@ -2823,7 +2949,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
"SRDOFFACK",
"SRDENT_ON",
};
- val = intel_de_read(dev_priv, EDP_PSR_STATUS(cpu_transcoder));
+ val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
@@ -2870,7 +2996,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
- val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder));
+ val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
@@ -2882,7 +3008,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
/*
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
- val = intel_de_read(dev_priv, EDP_PSR_PERF_CNT(cpu_transcoder));
+ val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
seq_printf(m, "Performance counter: %u\n",
REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 0f7db617425a..d39951383c92 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -7,6 +7,7 @@
#define __INTEL_PSR_REGS_H__
#include "intel_display_reg_defs.h"
+#include "intel_dp_aux_regs.h"
#define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A)
#define EXITLINE_ENABLE REG_BIT(31)
@@ -19,6 +20,7 @@
* HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one
* instance of it
*/
+#define HSW_SRD_CTL _MMIO(0x64800)
#define _SRD_CTL_A 0x60800
#define _SRD_CTL_EDP 0x6f800
#define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A)
@@ -79,10 +81,22 @@
#define EDP_PSR_PRE_ENTRY(trans) (TGL_PSR_PRE_ENTRY << \
_EDP_PSR_TRANS_SHIFT(trans))
+#define HSW_SRD_AUX_CTL _MMIO(0x64810)
+#define _SRD_AUX_CTL_A 0x60810
+#define _SRD_AUX_CTL_EDP 0x6f810
+#define EDP_PSR_AUX_CTL(tran) _MMIO_TRANS2(tran, _SRD_AUX_CTL_A)
+#define EDP_PSR_AUX_CTL_TIME_OUT_MASK DP_AUX_CH_CTL_TIME_OUT_MASK
+#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
+#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK DP_AUX_CH_CTL_PRECHARGE_2US_MASK
+#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT REG_BIT(11)
+#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK
+
+#define HSW_SRD_AUX_DATA(i) _MMIO(0x64814 + (i) * 4) /* 5 registers */
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
-#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) + 4) /* 5 registers */
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) * 4) /* 5 registers */
+#define HSW_SRD_STATUS _MMIO(0x64840)
#define _SRD_STATUS_A 0x60840
#define _SRD_STATUS_EDP 0x6f840
#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A)
@@ -107,12 +121,14 @@
#define EDP_PSR_STATUS_SENDING_TP1 REG_BIT(4)
#define EDP_PSR_STATUS_IDLE_MASK REG_GENMASK(3, 0)
+#define HSW_SRD_PERF_CNT _MMIO(0x64844)
#define _SRD_PERF_CNT_A 0x60844
#define _SRD_PERF_CNT_EDP 0x6f844
#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A)
#define EDP_PSR_PERF_CNT_MASK REG_GENMASK(23, 0)
/* PSR_MASK on SKL+ */
+#define HSW_SRD_DEBUG _MMIO(0x64860)
#define _SRD_DEBUG_A 0x60860
#define _SRD_DEBUG_EDP 0x6f860
#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A)
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
index 6e86c0971d24..543cdc46aa1d 100644
--- a/drivers/gpu/drm/i915/display/intel_qp_tables.c
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -17,13 +17,17 @@
/* from BPP 6 to 36 in steps of 0.5 */
#define RC_RANGE_QP444_12BPC_MAX_NUM_BPP 61
-/* from BPP 6 to 24 in steps of 0.5 */
+/* For YCbCr420 the bits_per_pixel sent in PPS params
+ * is double the target bpp. The below values represent
+ * the target bpp.
+ */
+/* from BPP 4 to 12 in steps of 0.5 */
#define RC_RANGE_QP420_8BPC_MAX_NUM_BPP 17
-/* from BPP 6 to 30 in steps of 0.5 */
+/* from BPP 4 to 15 in steps of 0.5 */
#define RC_RANGE_QP420_10BPC_MAX_NUM_BPP 23
-/* from BPP 6 to 36 in steps of 0.5 */
+/* from BPP 4 to 18 in steps of 0.5 */
#define RC_RANGE_QP420_12BPC_MAX_NUM_BPP 29
/*
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 21f92123c844..7d25a64698e2 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2097,7 +2097,7 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
const struct edid *edid = drm_edid_raw(drm_edid);
/* DDC bus is shared, match EDID to connector type */
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
status = connector_status_connected;
else
status = connector_status_disconnected;
@@ -2752,7 +2752,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
__drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
&conn_state->base.base);
- INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes);
+ intel_panel_init_alloc(&sdvo_connector->base);
return sdvo_connector;
}
@@ -3313,13 +3313,19 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
return i2c_add_adapter(&sdvo->ddc) == 0;
}
-static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
- enum port port)
+static bool is_sdvo_port_valid(struct drm_i915_private *dev_priv, enum port port)
{
if (HAS_PCH_SPLIT(dev_priv))
- drm_WARN_ON(&dev_priv->drm, port != PORT_B);
+ return port == PORT_B;
else
- drm_WARN_ON(&dev_priv->drm, port != PORT_B && port != PORT_C);
+ return port == PORT_B || port == PORT_C;
+}
+
+static bool assert_sdvo_port_valid(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ return !drm_WARN(&dev_priv->drm, !is_sdvo_port_valid(dev_priv, port),
+ "Platform does not support SDVO %c\n", port_name(port));
}
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
@@ -3329,7 +3335,11 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
struct intel_sdvo *intel_sdvo;
int i;
- assert_sdvo_port_valid(dev_priv, port);
+ if (!assert_port_valid(dev_priv, port))
+ return false;
+
+ if (!assert_sdvo_port_valid(dev_priv, port))
+ return false;
intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index bd9116d2cd76..9d76c2756784 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -53,22 +53,32 @@ static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
}
static void
+intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf,
+ int bpp)
+{
+ int bpc = vdsc_cfg->bits_per_component;
+
+ /* Read range_minqp and range_max_qp from qp tables */
+ vdsc_cfg->rc_range_params[buf].range_min_qp =
+ intel_lookup_range_min_qp(bpc, buf, bpp, vdsc_cfg->native_420);
+ vdsc_cfg->rc_range_params[buf].range_max_qp =
+ intel_lookup_range_max_qp(bpc, buf, bpp, vdsc_cfg->native_420);
+}
+
+/*
+ * We are using the method provided in DSC 1.2a C-Model in codec_main.c
+ * Above method use a common formula to derive values for any combination of DSC
+ * variables. The formula approach may yield slight differences in the derived PPS
+ * parameters from the original parameter sets. These differences are not consequential
+ * to the coding performance because all parameter sets have been shown to produce
+ * visually lossless quality (provides the same PPS values as
+ * DSCParameterValuesVESA V1-2 spreadsheet).
+ */
+static void
calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
{
int bpc = vdsc_cfg->bits_per_component;
int bpp = vdsc_cfg->bits_per_pixel >> 4;
- static const s8 ofs_und6[] = {
- 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
- };
- static const s8 ofs_und8[] = {
- 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
- };
- static const s8 ofs_und12[] = {
- 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
- };
- static const s8 ofs_und15[] = {
- 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
- };
int qp_bpc_modifier = (bpc - 8) * 2;
u32 res, buf_i, bpp_i;
@@ -78,6 +88,28 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
else
vdsc_cfg->first_line_bpg_offset = 2 * (vdsc_cfg->slice_height - 1);
+ /*
+ * According to DSC 1.2 spec in Section 4.1 if native_420 is set:
+ * -second_line_bpg_offset is 12 in general and equal to 2*(slice_height-1) if slice
+ * height < 8.
+ * -second_line_offset_adj is 512 as shown by emperical values to yield best chroma
+ * preservation in second line.
+ * -nsl_bpg_offset is calculated as second_line_offset/slice_height -1 then rounded
+ * up to 16 fractional bits, we left shift second line offset by 11 to preserve 11
+ * fractional bits.
+ */
+ if (vdsc_cfg->native_420) {
+ if (vdsc_cfg->slice_height >= 8)
+ vdsc_cfg->second_line_bpg_offset = 12;
+ else
+ vdsc_cfg->second_line_bpg_offset =
+ 2 * (vdsc_cfg->slice_height - 1);
+
+ vdsc_cfg->second_line_offset_adj = 512;
+ vdsc_cfg->nsl_bpg_offset = DIV_ROUND_UP(vdsc_cfg->second_line_bpg_offset << 11,
+ vdsc_cfg->slice_height - 1);
+ }
+
/* Our hw supports only 444 modes as of today */
if (bpp >= 12)
vdsc_cfg->initial_offset = 2048;
@@ -97,33 +129,88 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
vdsc_cfg->rc_quant_incr_limit0 = 11 + qp_bpc_modifier;
vdsc_cfg->rc_quant_incr_limit1 = 11 + qp_bpc_modifier;
- bpp_i = (2 * (bpp - 6));
- for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
- u8 range_bpg_offset;
-
- /* Read range_minqp and range_max_qp from qp tables */
- vdsc_cfg->rc_range_params[buf_i].range_min_qp =
- intel_lookup_range_min_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420);
- vdsc_cfg->rc_range_params[buf_i].range_max_qp =
- intel_lookup_range_max_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420);
-
- /* Calculate range_bpg_offset */
- if (bpp <= 6) {
- range_bpg_offset = ofs_und6[buf_i];
- } else if (bpp <= 8) {
- res = DIV_ROUND_UP(((bpp - 6) * (ofs_und8[buf_i] - ofs_und6[buf_i])), 2);
- range_bpg_offset = ofs_und6[buf_i] + res;
- } else if (bpp <= 12) {
- range_bpg_offset = ofs_und8[buf_i];
- } else if (bpp <= 15) {
- res = DIV_ROUND_UP(((bpp - 12) * (ofs_und15[buf_i] - ofs_und12[buf_i])), 3);
- range_bpg_offset = ofs_und12[buf_i] + res;
- } else {
- range_bpg_offset = ofs_und15[buf_i];
+ if (vdsc_cfg->native_420) {
+ static const s8 ofs_und4[] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
+ };
+ static const s8 ofs_und5[] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+ };
+ static const s8 ofs_und6[] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+ };
+ static const s8 ofs_und8[] = {
+ 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
+ };
+
+ bpp_i = bpp - 8;
+ for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
+ u8 range_bpg_offset;
+
+ intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i);
+
+ /* Calculate range_bpg_offset */
+ if (bpp <= 8) {
+ range_bpg_offset = ofs_und4[buf_i];
+ } else if (bpp <= 10) {
+ res = DIV_ROUND_UP(((bpp - 8) *
+ (ofs_und5[buf_i] - ofs_und4[buf_i])), 2);
+ range_bpg_offset = ofs_und4[buf_i] + res;
+ } else if (bpp <= 12) {
+ res = DIV_ROUND_UP(((bpp - 10) *
+ (ofs_und6[buf_i] - ofs_und5[buf_i])), 2);
+ range_bpg_offset = ofs_und5[buf_i] + res;
+ } else if (bpp <= 16) {
+ res = DIV_ROUND_UP(((bpp - 12) *
+ (ofs_und8[buf_i] - ofs_und6[buf_i])), 4);
+ range_bpg_offset = ofs_und6[buf_i] + res;
+ } else {
+ range_bpg_offset = ofs_und8[buf_i];
+ }
+
+ vdsc_cfg->rc_range_params[buf_i].range_bpg_offset =
+ range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
+ }
+ } else {
+ static const s8 ofs_und6[] = {
+ 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
+ };
+ static const s8 ofs_und8[] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+ };
+ static const s8 ofs_und12[] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+ };
+ static const s8 ofs_und15[] = {
+ 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
+ };
+
+ bpp_i = (2 * (bpp - 6));
+ for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
+ u8 range_bpg_offset;
+
+ intel_vdsc_set_min_max_qp(vdsc_cfg, buf_i, bpp_i);
+
+ /* Calculate range_bpg_offset */
+ if (bpp <= 6) {
+ range_bpg_offset = ofs_und6[buf_i];
+ } else if (bpp <= 8) {
+ res = DIV_ROUND_UP(((bpp - 6) *
+ (ofs_und8[buf_i] - ofs_und6[buf_i])), 2);
+ range_bpg_offset = ofs_und6[buf_i] + res;
+ } else if (bpp <= 12) {
+ range_bpg_offset = ofs_und8[buf_i];
+ } else if (bpp <= 15) {
+ res = DIV_ROUND_UP(((bpp - 12) *
+ (ofs_und15[buf_i] - ofs_und12[buf_i])), 3);
+ range_bpg_offset = ofs_und12[buf_i] + res;
+ } else {
+ range_bpg_offset = ofs_und15[buf_i];
+ }
+
+ vdsc_cfg->rc_range_params[buf_i].range_bpg_offset =
+ range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
}
-
- vdsc_cfg->rc_range_params[buf_i].range_bpg_offset =
- range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
}
}
@@ -190,30 +277,12 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
/*
- * According to DSC 1.2 specs in Section 4.1 if native_420 is set:
- * -We need to double the current bpp.
- * -second_line_bpg_offset is 12 in general and equal to 2*(slice_height-1) if slice
- * height < 8.
- * -second_line_offset_adj is 512 as shown by emperical values to yeild best chroma
- * preservation in second line.
- * -nsl_bpg_offset is calculated as second_line_offset/slice_height -1 then rounded
- * up to 16 fractional bits, we left shift second line offset by 11 to preserve 11
- * fractional bits.
+ * According to DSC 1.2 specs in Section 4.1 if native_420 is set
+ * we need to double the current bpp.
*/
- if (vdsc_cfg->native_420) {
+ if (vdsc_cfg->native_420)
vdsc_cfg->bits_per_pixel <<= 1;
- if (vdsc_cfg->slice_height >= 8)
- vdsc_cfg->second_line_bpg_offset = 12;
- else
- vdsc_cfg->second_line_bpg_offset =
- 2 * (vdsc_cfg->slice_height - 1);
-
- vdsc_cfg->second_line_offset_adj = 512;
- vdsc_cfg->nsl_bpg_offset = DIV_ROUND_UP(vdsc_cfg->second_line_bpg_offset << 11,
- vdsc_cfg->slice_height - 1);
- }
-
vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
drm_dsc_set_rc_buf_thresh(vdsc_cfg);
@@ -237,18 +306,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
if (ret)
return ret;
-
- /*
- * FIXME: verify that the hardware actually needs these
- * modifications rather than them being simple typos.
- */
- if (compressed_bpp == 6 &&
- vdsc_cfg->bits_per_component == 8)
- vdsc_cfg->rc_quant_incr_limit1 = 23;
-
- if (compressed_bpp == 8 &&
- vdsc_cfg->bits_per_component == 14)
- vdsc_cfg->rc_range_params[0].range_bpg_offset = 0;
}
/*
@@ -293,6 +350,16 @@ intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
}
+int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state)
+{
+ int num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1;
+
+ if (crtc_state->bigjoiner_pipes)
+ num_vdsc_instances *= 2;
+
+ return num_vdsc_instances;
+}
+
static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -303,11 +370,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
u32 pps_val = 0;
u32 rc_buf_thresh_dword[4];
u32 rc_range_params_dword[8];
- u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1;
int i = 0;
-
- if (crtc_state->bigjoiner_pipes)
- num_vdsc_instances *= 2;
+ int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
/* Populate PICTURE_PARAMETER_SET_0 registers */
pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor <<
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 8763f00fa7e2..2cc41ff08909 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -22,6 +22,7 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc);
+int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state);
void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 36070d86550f..ffc15d278a39 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -2174,7 +2174,7 @@ static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915,
return false;
/* Wa_22011186057 */
- if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
return false;
if (DISPLAY_VER(i915) >= 11)
@@ -2196,11 +2196,11 @@ static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
/* Wa_14010477008 */
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
- IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0))
+ (IS_TIGERLAKE(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_D0)))
return false;
/* Wa_22011186057 */
- if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+ if (IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
return false;
/* Wa_14013215631 */
@@ -2529,6 +2529,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->base = base;
offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
+ drm_WARN_ON(&dev_priv->drm, offset != 0);
val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
fb->height = REG_FIELD_GET(PLANE_HEIGHT_MASK, val) + 1;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index d1245c847f1c..063929a42a42 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -2900,7 +2900,7 @@ static int
skl_compute_wm(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index cd90a30e04d8..a96e7d028c5c 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -136,7 +136,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
enum port port = intel_dsi_host->port;
struct mipi_dsi_packet packet;
ssize_t ret;
- const u8 *header, *data;
+ const u8 *header;
i915_reg_t data_reg, ctrl_reg;
u32 data_mask, ctrl_mask;
@@ -145,7 +145,6 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
return ret;
header = packet.header;
- data = packet.payload;
if (msg->flags & MIPI_DSI_MSG_USE_LPM) {
data_reg = MIPI_LP_GEN_DATA(port);
@@ -672,20 +671,6 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
intel_de_posting_read(dev_priv, port_ctrl);
}
}
-
-static void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi)
-{
- ktime_t panel_power_on_time;
- s64 panel_power_off_duration;
-
- panel_power_on_time = ktime_get_boottime();
- panel_power_off_duration = ktime_ms_delta(panel_power_on_time,
- intel_dsi->panel_power_off_time);
-
- if (panel_power_off_duration < (s64)intel_dsi->panel_pwr_cycle_delay)
- msleep(intel_dsi->panel_pwr_cycle_delay - panel_power_off_duration);
-}
-
static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
@@ -832,8 +817,6 @@ static void bxt_dsi_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
-
intel_crtc_vblank_on(crtc_state);
}
@@ -944,13 +927,6 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
intel_dsi->panel_power_off_time = ktime_get_boottime();
}
-static void intel_dsi_shutdown(struct intel_encoder *encoder)
-{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-
- intel_dsi_wait_panel_power_cycle(intel_dsi);
-}
-
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
@@ -1040,7 +1016,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
enum port port;
- u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+ u16 hactive, hfp, hsync, hbp, vfp, vsync;
u16 hfp_sw, hsync_sw, hbp_sw;
u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
crtc_hblank_start_sw, crtc_hblank_end_sw;
@@ -1105,7 +1081,6 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
/* vertical values are in terms of lines */
vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port));
vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port));
- vbp = intel_de_read(dev_priv, MIPI_VBP_COUNT(port));
adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index fd556a076d05..1df74f7aa3dc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -97,8 +97,6 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
struct drm_i915_private *i915 = to_i915(obj->base.dev);
int ret;
- dma_resv_assert_held(dma_buf->resv);
-
if (obj->base.size < vma->vm_end - vma->vm_start)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index dfaaa8b66ac3..ffddec1d2a76 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -68,10 +68,8 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
spin_lock(&obj->vma.lock);
- for_each_ggtt_vma(vma, obj) {
- if (i915_vma_unset_ggtt_write(vma))
- intel_gt_flush_ggtt_writes(vma->vm->gt);
- }
+ for_each_ggtt_vma(vma, obj)
+ i915_vma_flush_writes(vma);
spin_unlock(&obj->vma.lock);
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 5fb459ea4294..5a687a3686bd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -736,7 +736,6 @@ static int eb_reserve(struct i915_execbuffer *eb)
struct eb_vma *ev;
unsigned int pass;
int err = 0;
- bool unpinned;
/*
* We have one more buffers that we couldn't bind, which could be due to
@@ -776,7 +775,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
pin_flags |= PIN_NONBLOCK;
if (pass >= 1)
- unpinned = eb_unbind(eb, pass >= 2);
+ eb_unbind(eb, pass >= 2);
if (pass == 2) {
err = mutex_lock_interruptible(&eb->context->vm->mutex);
@@ -2230,8 +2229,8 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
- if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) {
- drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
+ if (GRAPHICS_VER(rq->i915) != 7 || rq->engine->id != RCS0) {
+ drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -2692,6 +2691,7 @@ static int
eb_select_engine(struct i915_execbuffer *eb)
{
struct intel_context *ce, *child;
+ struct intel_gt *gt;
unsigned int idx;
int err;
@@ -2715,10 +2715,17 @@ eb_select_engine(struct i915_execbuffer *eb)
}
}
eb->num_batches = ce->parallel.number_children + 1;
+ gt = ce->engine->gt;
for_each_child(ce, child)
intel_context_get(child);
- intel_gt_pm_get(ce->engine->gt);
+ intel_gt_pm_get(gt);
+ /*
+ * Keep GT0 active on MTL so that i915_vma_parked() doesn't
+ * free VMAs while execbuf ioctl is validating VMAs.
+ */
+ if (gt->info.id)
+ intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@@ -2757,7 +2764,10 @@ eb_select_engine(struct i915_execbuffer *eb)
return err;
err:
- intel_gt_pm_put(ce->engine->gt);
+ if (gt->info.id)
+ intel_gt_pm_put(to_gt(gt->i915));
+
+ intel_gt_pm_put(gt);
for_each_child(ce, child)
intel_context_put(child);
intel_context_put(ce);
@@ -2770,6 +2780,12 @@ eb_put_engine(struct i915_execbuffer *eb)
struct intel_context *child;
i915_vm_put(eb->context->vm);
+ /*
+ * This works in conjunction with eb_select_engine() to prevent
+ * i915_vma_parked() from interfering while execbuf validates vmas.
+ */
+ if (eb->gt->info.id)
+ intel_gt_pm_put(to_gt(eb->gt->i915));
intel_gt_pm_put(eb->gt);
for_each_child(eb->context, child)
intel_context_put(child);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 4e7a838ab7bd..aa4d842d4c5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -1085,8 +1085,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma
/* handle stolen and smem objects */
mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC;
mmo = mmap_offset_attach(obj, mmap_type, NULL);
- if (!mmo)
- return -ENODEV;
+ if (IS_ERR(mmo))
+ return PTR_ERR(mmo);
}
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 97ac6fb37958..ef9346ed6d0f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -226,7 +226,7 @@ bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
* it, but since i915 takes the stance of always zeroing memory before
* handing it to userspace, we need to prevent this.
*/
- return IS_JSL_EHL(i915);
+ return (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915));
}
static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
@@ -469,7 +469,7 @@ void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
{
struct intel_frontbuffer *front;
- front = __intel_frontbuffer_get(obj);
+ front = i915_gem_object_get_frontbuffer(obj);
if (front) {
intel_frontbuffer_flush(front, origin);
intel_frontbuffer_put(front);
@@ -481,7 +481,7 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
{
struct intel_frontbuffer *front;
- front = __intel_frontbuffer_get(obj);
+ front = i915_gem_object_get_frontbuffer(obj);
if (front) {
intel_frontbuffer_invalidate(front, origin);
intel_frontbuffer_put(front);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 884a17275b3a..f607b87890dd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -716,10 +716,6 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
enum i915_map_type type);
-enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
- struct drm_i915_gem_object *obj,
- bool always_coherent);
-
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size);
@@ -891,4 +887,71 @@ static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *o
#endif
+/**
+ * i915_gem_object_get_frontbuffer - Get the object's frontbuffer
+ * @obj: The object whose frontbuffer to get.
+ *
+ * Get pointer to object's frontbuffer if such exists. Please note that RCU
+ * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer.
+ *
+ * Return: pointer to object's frontbuffer is such exists or NULL
+ */
+static inline struct intel_frontbuffer *
+i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
+{
+ struct intel_frontbuffer *front;
+
+ if (likely(!rcu_access_pointer(obj->frontbuffer)))
+ return NULL;
+
+ rcu_read_lock();
+ do {
+ front = rcu_dereference(obj->frontbuffer);
+ if (!front)
+ break;
+
+ if (unlikely(!kref_get_unless_zero(&front->ref)))
+ continue;
+
+ if (likely(front == rcu_access_pointer(obj->frontbuffer)))
+ break;
+
+ intel_frontbuffer_put(front);
+ } while (1);
+ rcu_read_unlock();
+
+ return front;
+}
+
+/**
+ * i915_gem_object_set_frontbuffer - Set the object's frontbuffer
+ * @obj: The object whose frontbuffer to set.
+ * @front: The frontbuffer to set
+ *
+ * Set object's frontbuffer pointer. If frontbuffer is already set for the
+ * object keep it and return it's pointer to the caller. Please note that RCU
+ * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This
+ * function is protected by i915->display.fb_tracking.lock
+ *
+ * Return: pointer to frontbuffer which was set.
+ */
+static inline struct intel_frontbuffer *
+i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj,
+ struct intel_frontbuffer *front)
+{
+ struct intel_frontbuffer *cur = front;
+
+ if (!front) {
+ RCU_INIT_POINTER(obj->frontbuffer, NULL);
+ } else if (rcu_access_pointer(obj->frontbuffer)) {
+ cur = rcu_dereference_protected(obj->frontbuffer, true);
+ kref_get(&cur->ref);
+ } else {
+ drm_gem_object_get(intel_bo_to_drm_bo(obj));
+ rcu_assign_pointer(obj->frontbuffer, front);
+ }
+
+ return cur;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index e72c57716bee..2292404007c8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -17,6 +17,8 @@
#include "i915_selftest.h"
#include "i915_vma_resource.h"
+#include "gt/intel_gt_defines.h"
+
struct drm_i915_gem_object;
struct intel_fronbuffer;
struct intel_memory_region;
@@ -675,7 +677,7 @@ struct drm_i915_gem_object {
*/
bool dirty:1;
- u32 tlb;
+ u32 tlb[I915_MAX_GT];
} mm;
struct {
@@ -718,6 +720,9 @@ struct drm_i915_gem_object {
};
};
+#define intel_bo_to_drm_bo(bo) (&(bo)->base)
+#define intel_bo_to_i915(bo) to_i915(intel_bo_to_drm_bo(bo)->dev)
+
static inline struct drm_i915_gem_object *
to_intel_bo(struct drm_gem_object *gem)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 89fc8ea6bcfc..6b6d22c19411 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -7,7 +7,7 @@
#include <drm/drm_cache.h>
#include "gt/intel_gt.h"
-#include "gt/intel_gt_pm.h"
+#include "gt/intel_tlb.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
@@ -193,13 +193,16 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct intel_gt *gt = to_gt(i915);
+ struct intel_gt *gt;
+ int id;
- if (!obj->mm.tlb)
- return;
+ for_each_gt(gt, i915, id) {
+ if (!obj->mm.tlb[id])
+ return;
- intel_gt_invalidate_tlb(gt, obj->mm.tlb);
- obj->mm.tlb = 0;
+ intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
+ obj->mm.tlb[id] = 0;
+ }
}
struct sg_table *
@@ -465,21 +468,6 @@ void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
return ret;
}
-enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
- struct drm_i915_gem_object *obj,
- bool always_coherent)
-{
- /*
- * Wa_22016122933: always return I915_MAP_WC for MTL
- */
- if (i915_gem_object_is_lmem(obj) || IS_METEORLAKE(i915))
- return I915_MAP_WC;
- if (HAS_LLC(i915) || always_coherent)
- return I915_MAP_WB;
- else
- return I915_MAP_WC;
-}
-
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index cad4a6017f4b..8f1633c3fb93 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -19,13 +19,13 @@
#include "i915_trace.h"
/*
- * Move pages to appropriate lru and release the pagevec, decrementing the
- * ref count of those pages.
+ * Move folios to appropriate lru and release the batch, decrementing the
+ * ref count of those folios.
*/
-static void check_release_pagevec(struct pagevec *pvec)
+static void check_release_folio_batch(struct folio_batch *fbatch)
{
- check_move_unevictable_pages(pvec);
- __pagevec_release(pvec);
+ check_move_unevictable_folios(fbatch);
+ __folio_batch_release(fbatch);
cond_resched();
}
@@ -33,24 +33,29 @@ void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
bool dirty, bool backup)
{
struct sgt_iter sgt_iter;
- struct pagevec pvec;
+ struct folio_batch fbatch;
+ struct folio *last = NULL;
struct page *page;
mapping_clear_unevictable(mapping);
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
for_each_sgt_page(page, sgt_iter, st) {
- if (dirty)
- set_page_dirty(page);
+ struct folio *folio = page_folio(page);
+ if (folio == last)
+ continue;
+ last = folio;
+ if (dirty)
+ folio_mark_dirty(folio);
if (backup)
- mark_page_accessed(page);
+ folio_mark_accessed(folio);
- if (!pagevec_add(&pvec, page))
- check_release_pagevec(&pvec);
+ if (!folio_batch_add(&fbatch, folio))
+ check_release_folio_batch(&fbatch);
}
- if (pagevec_count(&pvec))
- check_release_pagevec(&pvec);
+ if (fbatch.nr)
+ check_release_folio_batch(&fbatch);
sg_free_table(st);
}
@@ -63,8 +68,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
unsigned int page_count; /* restricted by sg_alloc_table */
unsigned long i;
struct scatterlist *sg;
- struct page *page;
- unsigned long last_pfn = 0; /* suppress gcc warning */
+ unsigned long next_pfn = 0; /* suppress gcc warning */
gfp_t noreclaim;
int ret;
@@ -95,6 +99,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
sg = st->sgl;
st->nents = 0;
for (i = 0; i < page_count; i++) {
+ struct folio *folio;
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
0,
@@ -103,12 +108,12 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
do {
cond_resched();
- page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- if (!IS_ERR(page))
+ folio = shmem_read_folio_gfp(mapping, i, gfp);
+ if (!IS_ERR(folio))
break;
if (!*s) {
- ret = PTR_ERR(page);
+ ret = PTR_ERR(folio);
goto err_sg;
}
@@ -147,19 +152,21 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
if (!i ||
sg->length >= max_segment ||
- page_to_pfn(page) != last_pfn + 1) {
+ folio_pfn(folio) != next_pfn) {
if (i)
sg = sg_next(sg);
st->nents++;
- sg_set_page(sg, page, PAGE_SIZE, 0);
+ sg_set_folio(sg, folio, folio_size(folio), 0);
} else {
- sg->length += PAGE_SIZE;
+ /* XXX: could overflow? */
+ sg->length += folio_size(folio);
}
- last_pfn = page_to_pfn(page);
+ next_pfn = folio_pfn(folio) + folio_nr_pages(folio);
+ i += folio_nr_pages(folio) - 1;
/* Check that the i965g/gm workaround works. */
- GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
+ GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);
}
if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg);
@@ -455,7 +462,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
struct page *page;
void *data, *vaddr;
int err;
- char c;
+ char __maybe_unused c;
len = PAGE_SIZE - pg;
if (len > remain)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 3b094d36a0b0..1a766d8e7cce 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -892,7 +892,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
} else {
resource_size_t lmem_range;
- lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
+ lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
lmem_size *= SZ_1G;
}
@@ -974,3 +974,39 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
{
return obj->ops == &i915_gem_object_stolen_ops;
}
+
+bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
+{
+ return drm_mm_initialized(&i915->mm.stolen);
+}
+
+u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
+{
+ return i915->dsm.stolen.start;
+}
+
+u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
+{
+ return resource_size(&i915->dsm.stolen);
+}
+
+u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
+ const struct drm_mm_node *node)
+{
+ return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
+}
+
+bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
+{
+ return drm_mm_node_allocated(node);
+}
+
+u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
+{
+ return node->start;
+}
+
+u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
+{
+ return node->size;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index d5005a39d130..258381d1c054 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -12,6 +12,8 @@ struct drm_i915_private;
struct drm_mm_node;
struct drm_i915_gem_object;
+#define i915_stolen_fb drm_mm_node
+
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment);
@@ -36,4 +38,15 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
#define I915_GEM_STOLEN_BIAS SZ_128K
+bool i915_gem_stolen_initialized(const struct drm_i915_private *i915);
+u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915);
+u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915);
+
+u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
+ const struct drm_mm_node *node);
+
+bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node);
+u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node);
+u64 i915_gem_stolen_node_size(const struct drm_mm_node *node);
+
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 4a33ad2d122b..d4b918fb11ce 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -186,7 +186,7 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
{
/* nsecs_to_jiffies64() does not guard against overflow */
- if (NSEC_PER_SEC % HZ &&
+ if ((NSEC_PER_SEC % HZ) != 0 &&
div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
return MAX_JIFFY_OFFSET;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index df6c9a84252c..6b9f6cf50bf6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1246,8 +1246,10 @@ static int igt_write_huge(struct drm_i915_private *i915,
* times in succession a possibility by enlarging the permutation array.
*/
order = i915_random_order(count * count, &prng);
- if (!order)
- return -ENOMEM;
+ if (!order) {
+ err = -ENOMEM;
+ goto out;
+ }
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64(max - size, max_page_size);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index a93a90b15907..d8f4a10d71de 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -13,12 +13,12 @@
#include "selftests/igt_spinner.h"
static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
+ struct intel_gt *gt,
bool fill)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned int i, count = obj->base.size / sizeof(u32);
enum i915_map_type map_type =
- i915_coherent_map_type(i915, obj, false);
+ intel_gt_coherent_map_type(gt, obj, false);
u32 *cur;
int err = 0;
@@ -66,7 +66,7 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
if (err)
continue;
- err = igt_fill_check_buffer(obj, true);
+ err = igt_fill_check_buffer(obj, gt, true);
if (err)
continue;
@@ -86,7 +86,7 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
if (err)
continue;
- err = igt_fill_check_buffer(obj, false);
+ err = igt_fill_check_buffer(obj, gt, false);
}
i915_gem_object_put(obj);
@@ -233,7 +233,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
continue;
if (!vma) {
- err = igt_fill_check_buffer(obj, true);
+ err = igt_fill_check_buffer(obj, gt, true);
if (err)
continue;
}
@@ -276,7 +276,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
if (err)
goto out_unlock;
} else {
- err = igt_fill_check_buffer(obj, false);
+ err = igt_fill_check_buffer(obj, gt, false);
}
out_unlock:
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index a93d8f9f8bc1..72957a36a36b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1681,7 +1681,9 @@ static int igt_mmap_gpu(void *arg)
static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
{
- if (!pte_present(*pte) || pte_none(*pte)) {
+ pte_t ptent = ptep_get(pte);
+
+ if (!pte_present(ptent) || pte_none(ptent)) {
pr_err("missing PTE:%lx\n",
(addr - (unsigned long)data) >> PAGE_SHIFT);
return -EINVAL;
@@ -1692,7 +1694,9 @@ static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
{
- if (pte_present(*pte) && !pte_none(*pte)) {
+ pte_t ptent = ptep_get(pte);
+
+ if (pte_present(ptent) && !pte_none(ptent)) {
pr_err("present PTE:%lx; expected to be revoked\n",
(addr - (unsigned long)data) >> PAGE_SHIFT);
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index 1c82caf525c3..8fe0499308ff 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -76,7 +76,7 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
- if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5)
+ if (IS_G4X(rq->i915) || GRAPHICS_VER(rq->i915) == 5)
cmd |= MI_INVALIDATE_ISP;
}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 23857cc08eca..a4ff55aa5e55 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -39,11 +39,11 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (GRAPHICS_VER(rq->engine->i915) == 9)
+ if (GRAPHICS_VER(rq->i915) == 9)
vf_flush_wa = true;
/* WaForGAMHang:kbl */
- if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0))
+ if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0))
dc_flush_wa = true;
}
@@ -165,14 +165,60 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | 1 << 8 | state;
}
-u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
+static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
{
- u32 gsi_offset = gt->uncore->gsi_offset;
+ switch (engine->id) {
+ case RCS0:
+ return GEN12_CCS_AUX_INV;
+ case BCS0:
+ return GEN12_BCS0_AUX_INV;
+ case VCS0:
+ return GEN12_VD0_AUX_INV;
+ case VCS2:
+ return GEN12_VD2_AUX_INV;
+ case VECS0:
+ return GEN12_VE0_AUX_INV;
+ case CCS0:
+ return GEN12_CCS0_AUX_INV;
+ default:
+ return INVALID_MMIO_REG;
+ }
+}
+
+static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
+{
+ i915_reg_t reg = gen12_get_aux_inv_reg(engine);
+
+ if (IS_PONTEVECCHIO(engine->i915))
+ return false;
+
+ /*
+ * So far platforms supported by i915 having flat ccs do not require
+ * AUX invalidation. Check also whether the engine requires it.
+ */
+ return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
+}
+
+u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
+{
+ i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
+ u32 gsi_offset = engine->gt->uncore->gsi_offset;
+
+ if (!gen12_needs_ccs_aux_inv(engine))
+ return cs;
*cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
*cs++ = AUX_INV;
- *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+ MI_SEMAPHORE_REGISTER_POLL |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
+ *cs++ = 0;
+ *cs++ = 0;
return cs;
}
@@ -180,8 +226,8 @@ u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv
static int mtl_dummy_pipe_control(struct i915_request *rq)
{
/* Wa_14016712196 */
- if (IS_MTL_GRAPHICS_STEP(rq->engine->i915, M, STEP_A0, STEP_B0) ||
- IS_MTL_GRAPHICS_STEP(rq->engine->i915, P, STEP_A0, STEP_B0)) {
+ if (IS_MTL_GRAPHICS_STEP(rq->i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(rq->i915, P, STEP_A0, STEP_B0)) {
u32 *cs;
/* dummy PIPE_CONTROL + depth flush */
@@ -202,8 +248,13 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
{
struct intel_engine_cs *engine = rq->engine;
- if (mode & EMIT_FLUSH) {
- u32 flags = 0;
+ /*
+ * On Aux CCS platforms the invalidation of the Aux
+ * table requires quiescing memory traffic beforehand
+ */
+ if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
+ u32 bit_group_0 = 0;
+ u32 bit_group_1 = 0;
int err;
u32 *cs;
@@ -211,32 +262,40 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (err)
return err;
- flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
- flags |= PIPE_CONTROL_FLUSH_L3;
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
+
+ /*
+ * When required, in MTL and beyond platforms we
+ * need to set the CCS_FLUSH bit in the pipe control
+ */
+ if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
+ bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
+
+ bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
+ bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/* Wa_1409600907:tgl,adl-p */
- flags |= PIPE_CONTROL_DEPTH_STALL;
- flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ bit_group_1 |= PIPE_CONTROL_DEPTH_STALL;
+ bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE;
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
+ bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX;
+ bit_group_1 |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_CS_STALL;
+ bit_group_1 |= PIPE_CONTROL_CS_STALL;
if (!HAS_3D_PIPELINE(engine->i915))
- flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+ bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
else if (engine->class == COMPUTE_CLASS)
- flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
+ bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
- cs = gen12_emit_pipe_control(cs,
- PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
- flags, LRC_PPHWSP_SCRATCH_ADDR);
+ cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1,
+ LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(rq, cs);
}
@@ -267,10 +326,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
else if (engine->class == COMPUTE_CLASS)
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
- if (!HAS_FLAT_CCS(rq->engine->i915))
- count = 8 + 4;
- else
- count = 8;
+ count = 8;
+ if (gen12_needs_ccs_aux_inv(rq->engine))
+ count += 8;
cs = intel_ring_begin(rq, count);
if (IS_ERR(cs))
@@ -285,11 +343,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
- if (!HAS_FLAT_CCS(rq->engine->i915)) {
- /* hsdes: 1809175790 */
- cs = gen12_emit_aux_table_inv(rq->engine->gt,
- cs, GEN12_GFX_CCS_AUX_NV);
- }
+ cs = gen12_emit_aux_table_inv(engine, cs);
*cs++ = preparser_disable(false);
intel_ring_advance(rq, cs);
@@ -300,21 +354,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
{
- intel_engine_mask_t aux_inv = 0;
- u32 cmd, *cs;
+ u32 cmd = 4;
+ u32 *cs;
- cmd = 4;
if (mode & EMIT_INVALIDATE) {
cmd += 2;
- if (!HAS_FLAT_CCS(rq->engine->i915) &&
- (rq->engine->class == VIDEO_DECODE_CLASS ||
- rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
- aux_inv = rq->engine->mask &
- ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0);
- if (aux_inv)
- cmd += 4;
- }
+ if (gen12_needs_ccs_aux_inv(rq->engine))
+ cmd += 8;
}
cs = intel_ring_begin(rq, cmd);
@@ -338,6 +385,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
cmd |= MI_INVALIDATE_TLB;
if (rq->engine->class == VIDEO_DECODE_CLASS)
cmd |= MI_INVALIDATE_BSD;
+
+ if (gen12_needs_ccs_aux_inv(rq->engine) &&
+ rq->engine->class == COPY_ENGINE_CLASS)
+ cmd |= MI_FLUSH_DW_CCS;
}
*cs++ = cmd;
@@ -345,14 +396,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
*cs++ = 0; /* upper addr */
*cs++ = 0; /* value */
- if (aux_inv) { /* hsdes: 1809175790 */
- if (rq->engine->class == VIDEO_DECODE_CLASS)
- cs = gen12_emit_aux_table_inv(rq->engine->gt,
- cs, GEN12_VD0_AUX_NV);
- else
- cs = gen12_emit_aux_table_inv(rq->engine->gt,
- cs, GEN12_VE0_AUX_NV);
- }
+ cs = gen12_emit_aux_table_inv(rq->engine, cs);
if (mode & EMIT_INVALIDATE)
*cs++ = preparser_disable(false);
@@ -754,7 +798,7 @@ u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
{
- struct drm_i915_private *i915 = rq->engine->i915;
+ struct drm_i915_private *i915 = rq->i915;
u32 flags = (PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TLB_INVALIDATE |
PIPE_CONTROL_TILE_CACHE_FLUSH |
@@ -775,7 +819,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
/* Wa_1409600907 */
flags |= PIPE_CONTROL_DEPTH_STALL;
- if (!HAS_3D_PIPELINE(rq->engine->i915))
+ if (!HAS_3D_PIPELINE(rq->i915))
flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
else if (rq->engine->class == COMPUTE_CLASS)
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
index 655e5c00ddc2..867ba697aceb 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -13,6 +13,7 @@
#include "intel_gt_regs.h"
#include "intel_gpu_commands.h"
+struct intel_engine_cs;
struct intel_gt;
struct i915_request;
@@ -46,28 +47,32 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
-u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
+u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs);
static inline u32 *
-__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+__gen8_emit_pipe_control(u32 *batch, u32 bit_group_0,
+ u32 bit_group_1, u32 offset)
{
memset(batch, 0, 6 * sizeof(u32));
- batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
- batch[1] = flags1;
+ batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0;
+ batch[1] = bit_group_1;
batch[2] = offset;
return batch + 6;
}
-static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
+static inline u32 *gen8_emit_pipe_control(u32 *batch,
+ u32 bit_group_1, u32 offset)
{
- return __gen8_emit_pipe_control(batch, 0, flags, offset);
+ return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset);
}
-static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0,
+ u32 bit_group_1, u32 offset)
{
- return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
+ return __gen8_emit_pipe_control(batch, bit_group_0,
+ bit_group_1, offset);
}
static inline u32 *
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index f948d33e5ec5..c8568e5d1147 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -37,9 +37,6 @@ static u64 gen8_pte_encode(dma_addr_t addr,
if (unlikely(flags & PTE_READ_ONLY))
pte &= ~GEN8_PAGE_RW;
- if (flags & PTE_LM)
- pte |= GEN12_PPGTT_PTE_LM;
-
/*
* For pre-gen12 platforms pat_index is the same as enum
* i915_cache_level, so the switch-case here is still valid.
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 0aff5bb13c53..ee15486fed0d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1333,6 +1333,7 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
if (!frame)
return -ENOMEM;
+ frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.context = ce;
rcu_assign_pointer(frame->rq.timeline, ce->timeline);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index ee531a5c142c..b538b5c04948 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -39,7 +39,7 @@ static void dbg_poison_ce(struct intel_context *ce)
if (ce->state) {
struct drm_i915_gem_object *obj = ce->state->obj;
- int type = i915_coherent_map_type(ce->engine->i915, obj, true);
+ int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true);
void *map;
if (!i915_gem_object_trylock(obj, NULL))
@@ -296,9 +296,7 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- struct intel_runtime_pm *rpm = engine->uncore->rpm;
-
- intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
+ intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops);
intel_engine_init_heartbeat(engine);
intel_gsc_idle_msg_enable(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 750326434677..8a641bcf777c 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2327,6 +2327,7 @@ static u32 active_ccid(struct intel_engine_cs *engine)
static void execlists_capture(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
struct execlists_capture *cap;
if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
@@ -2375,7 +2376,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
goto err_rq;
INIT_WORK(&cap->work, execlists_capture_work);
- schedule_work(&cap->work);
+ queue_work(i915->unordered_wq, &cap->work);
return;
err_rq:
@@ -2717,7 +2718,7 @@ static int emit_pdps(struct i915_request *rq)
int err, i;
u32 *cs;
- GEM_BUG_ON(intel_vgpu_active(rq->engine->i915));
+ GEM_BUG_ON(intel_vgpu_active(rq->i915));
/*
* Beware ye of the dragons, this sequence is magic!
@@ -3555,16 +3556,16 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
lrc_init_wa_ctx(engine);
if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = uncore->regs +
+ execlists->submit_reg = intel_uncore_regs(uncore) +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
- execlists->ctrl_reg = uncore->regs +
+ execlists->ctrl_reg = intel_uncore_regs(uncore) +
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
engine->fw_domain = intel_uncore_forcewake_for_reg(engine->uncore,
RING_EXECLIST_CONTROL(engine->mmio_base),
FW_REG_WRITE);
} else {
- execlists->submit_reg = uncore->regs +
+ execlists->submit_reg = intel_uncore_regs(uncore) +
i915_mmio_reg_offset(RING_ELSP(base));
}
@@ -3680,7 +3681,7 @@ static void virtual_context_destroy(struct kref *kref)
* lock, we can delegate the free of the engine to an RCU worker.
*/
INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
- queue_rcu_work(system_wq, &ve->rcu);
+ queue_rcu_work(ve->context.engine->i915->unordered_wq, &ve->rcu);
}
static void virtual_engine_initial_hint(struct virtual_engine *ve)
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 37d0b0fe791d..40371b8a9bbb 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -818,7 +818,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
if (obj->bit_17 == NULL) {
obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
if (obj->bit_17 == NULL) {
- drm_err(&to_i915(obj->base.dev)->drm,
+ drm_err(obj->base.dev,
"Failed to allocate memory for bit 17 record\n");
return;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 5d143e2a8db0..2bd8d98d2110 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -121,6 +121,7 @@
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */
+#define MI_SEMAPHORE_REGISTER_POLL (1 << 16)
#define MI_SEMAPHORE_POLL (1 << 15)
#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
@@ -299,6 +300,7 @@
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_CCS_FLUSH (1<<13) /* MTL+ */
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 7a008e829d4d..449f0b7fc843 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -33,6 +33,7 @@
#include "intel_rps.h"
#include "intel_sa_media.h"
#include "intel_gt_sysfs.h"
+#include "intel_tlb.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
@@ -50,8 +51,7 @@ void intel_gt_common_init_early(struct intel_gt *gt)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
- mutex_init(&gt->tlb.invalidate_lock);
- seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
+ intel_gt_init_tlb(gt);
intel_gt_pm_init_early(gt);
intel_wopcm_init_early(&gt->wopcm);
@@ -179,7 +179,7 @@ int intel_gt_init_hw(struct intel_gt *gt)
if (IS_HASWELL(i915))
intel_uncore_write(uncore,
HSW_MI_PREDICATE_RESULT_2,
- IS_HSW_GT3(i915) ?
+ IS_HASWELL_GT3(i915) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
@@ -466,7 +466,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
obj = i915_gem_object_create_lmem(i915, size,
I915_BO_ALLOC_VOLATILE |
I915_BO_ALLOC_GPU_ONLY);
- if (IS_ERR(obj))
+ if (IS_ERR(obj) && !IS_METEORLAKE(i915)) /* Wa_22018444074 */
obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
@@ -846,7 +846,7 @@ void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
intel_gt_fini_timelines(gt);
- mutex_destroy(&gt->tlb.invalidate_lock);
+ intel_gt_fini_tlb(gt);
intel_engines_free(gt);
}
}
@@ -887,7 +887,7 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
int intel_gt_probe_all(struct drm_i915_private *i915)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- struct intel_gt *gt = &i915->gt0;
+ struct intel_gt *gt = to_gt(i915);
const struct intel_gt_definition *gtdef;
phys_addr_t phys_addr;
unsigned int mmio_bar;
@@ -904,7 +904,7 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
*/
gt->i915 = i915;
gt->name = "Primary GT";
- gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
+ gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask;
gt_dbg(gt, "Setting up %s\n", gt->name);
ret = intel_gt_tile_setup(gt, phys_addr);
@@ -1004,136 +1004,18 @@ void intel_gt_info_print(const struct intel_gt_info *info,
intel_sseu_dump(&info->sseu, p);
}
-/*
- * HW architecture suggest typical invalidation time at 40us,
- * with pessimistic cases up to 100us and a recommendation to
- * cap at 1ms. We go a bit higher just in case.
- */
-#define TLB_INVAL_TIMEOUT_US 100
-#define TLB_INVAL_TIMEOUT_MS 4
-
-/*
- * On Xe_HP the TLB invalidation registers are located at the same MMIO offsets
- * but are now considered MCR registers. Since they exist within a GAM range,
- * the primary instance of the register rolls up the status from each unit.
- */
-static int wait_for_invalidate(struct intel_engine_cs *engine)
+enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
+ struct drm_i915_gem_object *obj,
+ bool always_coherent)
{
- if (engine->tlb_inv.mcr)
- return intel_gt_mcr_wait_for_reg(engine->gt,
- engine->tlb_inv.reg.mcr_reg,
- engine->tlb_inv.done,
- 0,
- TLB_INVAL_TIMEOUT_US,
- TLB_INVAL_TIMEOUT_MS);
- else
- return __intel_wait_for_register_fw(engine->gt->uncore,
- engine->tlb_inv.reg.reg,
- engine->tlb_inv.done,
- 0,
- TLB_INVAL_TIMEOUT_US,
- TLB_INVAL_TIMEOUT_MS,
- NULL);
-}
-
-static void mmio_invalidate_full(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_uncore *uncore = gt->uncore;
- struct intel_engine_cs *engine;
- intel_engine_mask_t awake, tmp;
- enum intel_engine_id id;
- unsigned long flags;
-
- if (GRAPHICS_VER(i915) < 8)
- return;
-
- intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
- intel_gt_mcr_lock(gt, &flags);
- spin_lock(&uncore->lock); /* serialise invalidate with GT reset */
-
- awake = 0;
- for_each_engine(engine, gt, id) {
- if (!intel_engine_pm_is_awake(engine))
- continue;
-
- if (engine->tlb_inv.mcr)
- intel_gt_mcr_multicast_write_fw(gt,
- engine->tlb_inv.reg.mcr_reg,
- engine->tlb_inv.request);
- else
- intel_uncore_write_fw(uncore,
- engine->tlb_inv.reg.reg,
- engine->tlb_inv.request);
-
- awake |= engine->mask;
- }
-
- GT_TRACE(gt, "invalidated engines %08x\n", awake);
-
- /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
- if (awake &&
- (IS_TIGERLAKE(i915) ||
- IS_DG1(i915) ||
- IS_ROCKETLAKE(i915) ||
- IS_ALDERLAKE_S(i915) ||
- IS_ALDERLAKE_P(i915)))
- intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
-
- spin_unlock(&uncore->lock);
- intel_gt_mcr_unlock(gt, flags);
-
- for_each_engine_masked(engine, gt, awake, tmp) {
- if (wait_for_invalidate(engine))
- gt_err_ratelimited(gt,
- "%s TLB invalidation did not complete in %ums!\n",
- engine->name, TLB_INVAL_TIMEOUT_MS);
- }
-
/*
- * Use delayed put since a) we mostly expect a flurry of TLB
- * invalidations so it is good to avoid paying the forcewake cost and
- * b) it works around a bug in Icelake which cannot cope with too rapid
- * transitions.
+ * Wa_22016122933: always return I915_MAP_WC for Media
+ * version 13.0 when the object is on the Media GT
*/
- intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
-}
-
-static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
-{
- u32 cur = intel_gt_tlb_seqno(gt);
-
- /* Only skip if a *full* TLB invalidate barrier has passed */
- return (s32)(cur - ALIGN(seqno, 2)) > 0;
-}
-
-void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
-{
- intel_wakeref_t wakeref;
-
- if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
- return;
-
- if (intel_gt_is_wedged(gt))
- return;
-
- if (tlb_seqno_passed(gt, seqno))
- return;
-
- with_intel_gt_pm_if_awake(gt, wakeref) {
- mutex_lock(&gt->tlb.invalidate_lock);
- if (tlb_seqno_passed(gt, seqno))
- goto unlock;
-
- mmio_invalidate_full(gt);
-
- write_seqcount_invalidate(&gt->tlb.seqno);
-unlock:
- mutex_unlock(&gt->tlb.invalidate_lock);
- }
+ if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt))
+ return I915_MAP_WC;
+ if (HAS_LLC(gt->i915) || always_coherent)
+ return I915_MAP_WB;
+ else
+ return I915_MAP_WC;
}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftest_tlb.c"
-#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index d2f4fbde5f9f..6c34547b58b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_GT__
#define __INTEL_GT__
+#include "i915_drv.h"
#include "intel_engine_types.h"
#include "intel_gt_types.h"
#include "intel_reset.h"
@@ -24,6 +25,11 @@ static inline bool gt_is_root(struct intel_gt *gt)
return !gt->info.id;
}
+static inline bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
+{
+ return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
+}
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
@@ -107,16 +113,8 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
-static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
-{
- return seqprop_sequence(&gt->tlb.seqno);
-}
-
-static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
-{
- return intel_gt_tlb_seqno(gt) | 1;
-}
-
-void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno);
+enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
+ struct drm_i915_gem_object *obj,
+ bool always_coherent);
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index cadfd85785b1..86b5a9ba323d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -88,10 +88,11 @@ static void pool_free_work(struct work_struct *wrk)
{
struct intel_gt_buffer_pool *pool =
container_of(wrk, typeof(*pool), work.work);
+ struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
if (pool_free_older_than(pool, HZ))
- schedule_delayed_work(&pool->work,
- round_jiffies_up_relative(HZ));
+ queue_delayed_work(gt->i915->unordered_wq, &pool->work,
+ round_jiffies_up_relative(HZ));
}
static void pool_retire(struct i915_active *ref)
@@ -99,6 +100,7 @@ static void pool_retire(struct i915_active *ref)
struct intel_gt_buffer_pool_node *node =
container_of(ref, typeof(*node), active);
struct intel_gt_buffer_pool *pool = node->pool;
+ struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
struct list_head *list = bucket_for_size(pool, node->obj->base.size);
unsigned long flags;
@@ -116,8 +118,8 @@ static void pool_retire(struct i915_active *ref)
WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
spin_unlock_irqrestore(&pool->lock, flags);
- schedule_delayed_work(&pool->work,
- round_jiffies_up_relative(HZ));
+ queue_delayed_work(gt->i915->unordered_wq, &pool->work,
+ round_jiffies_up_relative(HZ));
}
void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_defines.h b/drivers/gpu/drm/i915/gt/intel_gt_defines.h
new file mode 100644
index 000000000000..5017788bac8f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_defines.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_DEFINES__
+#define __INTEL_GT_DEFINES__
+
+#define I915_MAX_GT 2
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 8f888d36f16d..77fb57223465 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -31,7 +31,7 @@ static u32
gen11_gt_engine_identity(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit)
{
- void __iomem * const regs = gt->uncore->regs;
+ void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 timeout_ts;
u32 ident;
@@ -148,7 +148,7 @@ gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
static void
gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
{
- void __iomem * const regs = gt->uncore->regs;
+ void __iomem * const regs = intel_uncore_regs(gt->uncore);
unsigned long intr_dw;
unsigned int bit;
@@ -183,7 +183,7 @@ void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
bool gen11_gt_reset_one_iir(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit)
{
- void __iomem * const regs = gt->uncore->regs;
+ void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 dw;
lockdep_assert_held(gt->irq_lock);
@@ -376,7 +376,7 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
gt->i915->l3_parity.which_slice |= 1 << 0;
- schedule_work(&gt->i915->l3_parity.error_work);
+ queue_work(gt->i915->unordered_wq, &gt->i915->l3_parity.error_work);
}
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
@@ -404,7 +404,7 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
{
- void __iomem * const regs = gt->uncore->regs;
+ void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 iir;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index c2e69bafd02b..5a942af0a14e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -137,7 +137,7 @@ void intel_gt_pm_init_early(struct intel_gt *gt)
* runtime_pm is per-device rather than per-tile, so this is still the
* correct structure.
*/
- intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
+ intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops);
seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 718cb2c80f79..2cdfb2f713d0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -332,9 +332,11 @@
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x4180)
-#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208)
-#define GEN12_VD0_AUX_NV _MMIO(0x4218)
-#define GEN12_VD1_AUX_NV _MMIO(0x4228)
+
+#define GEN12_CCS_AUX_INV _MMIO(0x4208)
+#define GEN12_VD0_AUX_INV _MMIO(0x4218)
+#define GEN12_VE0_AUX_INV _MMIO(0x4238)
+#define GEN12_BCS0_AUX_INV _MMIO(0x4248)
#define GEN8_RTCR _MMIO(0x4260)
#define GEN8_M1TCR _MMIO(0x4264)
@@ -342,14 +344,12 @@
#define GEN8_BTCR _MMIO(0x426c)
#define GEN8_VTCR _MMIO(0x4270)
-#define GEN12_VD2_AUX_NV _MMIO(0x4298)
-#define GEN12_VD3_AUX_NV _MMIO(0x42a8)
-#define GEN12_VE0_AUX_NV _MMIO(0x4238)
-
#define BLT_HWS_PGA_GEN7 _MMIO(0x4280)
-#define GEN12_VE1_AUX_NV _MMIO(0x42b8)
+#define GEN12_VD2_AUX_INV _MMIO(0x4298)
+#define GEN12_CCS0_AUX_INV _MMIO(0x42c8)
#define AUX_INV REG_BIT(0)
+
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380)
#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 1dfd01668c79..d1a382dfaa1d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -116,7 +116,7 @@ void intel_engine_add_retire(struct intel_engine_cs *engine,
GEM_BUG_ON(intel_engine_is_virtual(engine));
if (add_retire(engine, tl))
- schedule_work(&engine->retire_work);
+ queue_work(engine->i915->unordered_wq, &engine->retire_work);
}
void intel_engine_init_retire(struct intel_engine_cs *engine)
@@ -207,8 +207,8 @@ static void retire_work_handler(struct work_struct *work)
struct intel_gt *gt =
container_of(work, typeof(*gt), requests.retire_work.work);
- schedule_delayed_work(&gt->requests.retire_work,
- round_jiffies_up_relative(HZ));
+ queue_delayed_work(gt->i915->unordered_wq, &gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
intel_gt_retire_requests(gt);
}
@@ -224,8 +224,8 @@ void intel_gt_park_requests(struct intel_gt *gt)
void intel_gt_unpark_requests(struct intel_gt *gt)
{
- schedule_delayed_work(&gt->requests.retire_work,
- round_jiffies_up_relative(HZ));
+ queue_delayed_work(gt->i915->unordered_wq, &gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
}
void intel_gt_fini_requests(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index ee2b44f896a2..f0dea54880af 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -701,6 +701,80 @@ static const struct attribute *media_perf_power_attrs[] = {
};
static ssize_t
+rps_up_threshold_pct_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_rps *rps = &gt->rps;
+
+ return sysfs_emit(buf, "%u\n", intel_rps_get_up_threshold(rps));
+}
+
+static ssize_t
+rps_up_threshold_pct_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_rps *rps = &gt->rps;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ ret = intel_rps_set_up_threshold(rps, val);
+
+ return ret == 0 ? count : ret;
+}
+
+static struct kobj_attribute rps_up_threshold_pct =
+ __ATTR(rps_up_threshold_pct,
+ 0664,
+ rps_up_threshold_pct_show,
+ rps_up_threshold_pct_store);
+
+static ssize_t
+rps_down_threshold_pct_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_rps *rps = &gt->rps;
+
+ return sysfs_emit(buf, "%u\n", intel_rps_get_down_threshold(rps));
+}
+
+static ssize_t
+rps_down_threshold_pct_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_rps *rps = &gt->rps;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ ret = intel_rps_set_down_threshold(rps, val);
+
+ return ret == 0 ? count : ret;
+}
+
+static struct kobj_attribute rps_down_threshold_pct =
+ __ATTR(rps_down_threshold_pct,
+ 0664,
+ rps_down_threshold_pct_show,
+ rps_down_threshold_pct_store);
+
+static const struct attribute * const gen6_gt_rps_attrs[] = {
+ &rps_up_threshold_pct.attr,
+ &rps_down_threshold_pct.attr,
+ NULL
+};
+
+static ssize_t
default_min_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_gt *gt = kobj_to_gt(kobj->parent);
@@ -722,9 +796,37 @@ default_max_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, cha
static struct kobj_attribute default_max_freq_mhz =
__ATTR(rps_max_freq_mhz, 0444, default_max_freq_mhz_show, NULL);
+static ssize_t
+default_rps_up_threshold_pct_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+ return sysfs_emit(buf, "%u\n", gt->defaults.rps_up_threshold);
+}
+
+static struct kobj_attribute default_rps_up_threshold_pct =
+__ATTR(rps_up_threshold_pct, 0444, default_rps_up_threshold_pct_show, NULL);
+
+static ssize_t
+default_rps_down_threshold_pct_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+ return sysfs_emit(buf, "%u\n", gt->defaults.rps_down_threshold);
+}
+
+static struct kobj_attribute default_rps_down_threshold_pct =
+__ATTR(rps_down_threshold_pct, 0444, default_rps_down_threshold_pct_show, NULL);
+
static const struct attribute * const rps_defaults_attrs[] = {
&default_min_freq_mhz.attr,
&default_max_freq_mhz.attr,
+ &default_rps_up_threshold_pct.attr,
+ &default_rps_down_threshold_pct.attr,
NULL
};
@@ -752,6 +854,12 @@ static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj)
if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
ret = sysfs_create_file(kobj, vlv_attr);
+ if (is_object_gt(kobj) && !intel_uc_uses_guc_slpc(&gt->uc)) {
+ ret = sysfs_create_files(kobj, gen6_gt_rps_attrs);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index f08c2556aa25..def7dd0eb6f1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -83,6 +83,9 @@ enum intel_submission_method {
struct gt_defaults {
u32 min_freq;
u32 max_freq;
+
+ u8 rps_up_threshold;
+ u8 rps_down_threshold;
};
enum intel_gt_type {
@@ -306,4 +309,6 @@ enum intel_gt_scratch_field {
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
};
+#define intel_gt_support_legacy_fencing(gt) ((gt)->ggtt->num_fences > 0)
+
#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 2f6a9be0ffe6..13944a14ea2d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -89,7 +89,7 @@ int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
enum i915_map_type type;
void *vaddr;
- type = i915_coherent_map_type(vm->i915, obj, true);
+ type = intel_gt_coherent_map_type(vm->gt, obj, true);
vaddr = i915_gem_object_pin_map_unlocked(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -103,7 +103,7 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
enum i915_map_type type;
void *vaddr;
- type = i915_coherent_map_type(vm->i915, obj, true);
+ type = intel_gt_coherent_map_type(vm->gt, obj, true);
vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -670,7 +670,7 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
if (IS_ERR(obj))
return ERR_CAST(obj);
- i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index a4ec20aaafe2..957d0aeb0c02 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1092,8 +1092,16 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
obj = i915_gem_object_create_lmem(engine->i915, context_size,
I915_BO_ALLOC_PM_VOLATILE);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
obj = i915_gem_object_create_shmem(engine->i915, context_size);
+ /*
+ * Wa_22016122933: For Media version 13.0, all Media GT shared
+ * memory needs to be mapped as WC on CPU side and UC (PAT
+ * index 2) on GPU side.
+ */
+ if (intel_gt_needs_wa_22016122933(engine->gt))
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+ }
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -1184,9 +1192,9 @@ lrc_pre_pin(struct intel_context *ce,
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
*vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ce->engine->i915,
- ce->state->obj,
- false) |
+ intel_gt_coherent_map_type(ce->engine->gt,
+ ce->state->obj,
+ false) |
I915_MAP_OVERRIDE);
return PTR_ERR_OR_ZERO(*vaddr);
@@ -1364,10 +1372,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
IS_DG2_G11(ce->engine->i915))
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
- /* hsdes: 1809175790 */
- if (!HAS_FLAT_CCS(ce->engine->i915))
- cs = gen12_emit_aux_table_inv(ce->engine->gt,
- cs, GEN12_GFX_CCS_AUX_NV);
+ cs = gen12_emit_aux_table_inv(ce->engine, cs);
/* Wa_16014892111 */
if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) ||
@@ -1392,17 +1397,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
0);
- /* hsdes: 1809175790 */
- if (!HAS_FLAT_CCS(ce->engine->i915)) {
- if (ce->engine->class == VIDEO_DECODE_CLASS)
- cs = gen12_emit_aux_table_inv(ce->engine->gt,
- cs, GEN12_VD0_AUX_NV);
- else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
- cs = gen12_emit_aux_table_inv(ce->engine->gt,
- cs, GEN12_VE0_AUX_NV);
- }
-
- return cs;
+ return gen12_emit_aux_table_inv(ce->engine, cs);
}
static void
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 6023288b0e2d..576e5ef0289b 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -366,7 +366,7 @@ static int emit_pte(struct i915_request *rq,
u64 offset,
int length)
{
- bool has_64K_pages = HAS_64K_PAGES(rq->engine->i915);
+ bool has_64K_pages = HAS_64K_PAGES(rq->i915);
const u64 encode = rq->context->vm->pte_encode(0, pat_index,
is_lmem ? PTE_LM : 0);
struct intel_ring *ring = rq->ring;
@@ -375,7 +375,7 @@ static int emit_pte(struct i915_request *rq,
u32 page_size;
u32 *hdr, *cs;
- GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8);
+ GEM_BUG_ON(GRAPHICS_VER(rq->i915) < 8);
page_size = I915_GTT_PAGE_SIZE;
dword_length = 0x400;
@@ -531,7 +531,7 @@ static int emit_copy_ccs(struct i915_request *rq,
u32 dst_offset, u8 dst_access,
u32 src_offset, u8 src_access, int size)
{
- struct drm_i915_private *i915 = rq->engine->i915;
+ struct drm_i915_private *i915 = rq->i915;
int mocs = rq->engine->gt->mocs.uc_index << 1;
u32 num_ccs_blks;
u32 *cs;
@@ -581,7 +581,7 @@ static int emit_copy_ccs(struct i915_request *rq,
static int emit_copy(struct i915_request *rq,
u32 dst_offset, u32 src_offset, int size)
{
- const int ver = GRAPHICS_VER(rq->engine->i915);
+ const int ver = GRAPHICS_VER(rq->i915);
u32 instance = rq->engine->instance;
u32 *cs;
@@ -917,7 +917,7 @@ out_ce:
static int emit_clear(struct i915_request *rq, u32 offset, int size,
u32 value, bool is_lmem)
{
- struct drm_i915_private *i915 = rq->engine->i915;
+ struct drm_i915_private *i915 = rq->i915;
int mocs = rq->engine->gt->mocs.uc_index << 1;
const int ver = GRAPHICS_VER(i915);
int ring_sz;
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 436756bfbb1a..d07a4f97b943 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
+#include "intel_gt.h"
#include "intel_gtt.h"
#include "gen6_ppgtt.h"
#include "gen8_ppgtt.h"
@@ -210,8 +211,7 @@ void ppgtt_unbind_vma(struct i915_address_space *vm,
return;
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
- if (vma_res->tlb)
- vma_invalidate_tlb(vm, vma_res->tlb);
+ vma_invalidate_tlb(vm, vma_res->tlb);
}
static unsigned long pd_count(u64 size, int shift)
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 2a3217e2890f..f8512aee58a8 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -220,7 +220,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
resource_size_t lmem_range;
u64 tile_stolen, flat_ccs_base;
- lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
+ lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
lmem_size *= SZ_1G;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 195ff72d7a14..cc6bd21a3e51 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -35,9 +35,6 @@
#define RESET_MAX_RETRIES 3
-/* XXX How to handle concurrent GGTT updates using tiling registers? */
-#define RESET_UNDER_STOP_MACHINE 0
-
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
{
struct drm_i915_file_private *file_priv = ctx->file_priv;
@@ -1625,7 +1622,7 @@ void __intel_init_wedge(struct intel_wedge_me *w,
w->name = name;
INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
- schedule_delayed_work(&w->work, timeout);
+ queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout);
}
void __intel_fini_wedge(struct intel_wedge_me *w)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index fb99143be98e..59da4b7bd262 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -13,6 +13,7 @@
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_ring.h"
+#include "intel_gt.h"
#include "intel_timeline.h"
unsigned int intel_ring_update_space(struct intel_ring *ring)
@@ -56,7 +57,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
addr = (void __force *)i915_vma_pin_iomap(vma);
} else {
- int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
+ int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false);
addr = i915_gem_object_pin_map(vma->obj, type);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 3fd795c3263f..92085ffd23de 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -805,7 +805,7 @@ static int mi_set_context(struct i915_request *rq,
static int remap_l3_slice(struct i915_request *rq, int slice)
{
#define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32))
- u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
+ u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
int i;
if (!remap_info)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index e68a99205599..092542f53aad 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -16,7 +16,9 @@
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_irq.h"
+#include "intel_gt_pm.h"
#include "intel_gt_pm_irq.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
@@ -73,13 +75,14 @@ static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
static void rps_timer(struct timer_list *t)
{
struct intel_rps *rps = from_timer(rps, t, timer);
+ struct intel_gt *gt = rps_to_gt(rps);
struct intel_engine_cs *engine;
ktime_t dt, last, timestamp;
enum intel_engine_id id;
s64 max_busy[3] = {};
timestamp = 0;
- for_each_engine(engine, rps_to_gt(rps), id) {
+ for_each_engine(engine, gt, id) {
s64 busy;
int i;
@@ -123,7 +126,7 @@ static void rps_timer(struct timer_list *t)
busy += div_u64(max_busy[i], 1 << i);
}
- GT_TRACE(rps_to_gt(rps),
+ GT_TRACE(gt,
"busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
busy, (int)div64_u64(100 * busy, dt),
max_busy[0], max_busy[1], max_busy[2],
@@ -133,12 +136,12 @@ static void rps_timer(struct timer_list *t)
rps->cur_freq < rps->max_freq_softlimit) {
rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
rps->pm_interval = 1;
- schedule_work(&rps->work);
+ queue_work(gt->i915->unordered_wq, &rps->work);
} else if (100 * busy < rps->power.down_threshold * dt &&
rps->cur_freq > rps->min_freq_softlimit) {
rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
rps->pm_interval = 1;
- schedule_work(&rps->work);
+ queue_work(gt->i915->unordered_wq, &rps->work);
} else {
rps->last_adj = 0;
}
@@ -671,7 +674,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
{
struct intel_gt *gt = rps_to_gt(rps);
struct intel_uncore *uncore = gt->uncore;
- u32 threshold_up = 0, threshold_down = 0; /* in % */
u32 ei_up = 0, ei_down = 0;
lockdep_assert_held(&rps->power.mutex);
@@ -679,9 +681,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
if (new_power == rps->power.mode)
return;
- threshold_up = 95;
- threshold_down = 85;
-
/* Note the units here are not exactly 1us, but 1280ns. */
switch (new_power) {
case LOW_POWER:
@@ -708,17 +707,22 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
GT_TRACE(gt,
"changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
- new_power, threshold_up, ei_up, threshold_down, ei_down);
+ new_power,
+ rps->power.up_threshold, ei_up,
+ rps->power.down_threshold, ei_down);
set(uncore, GEN6_RP_UP_EI,
intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
set(uncore, GEN6_RP_UP_THRESHOLD,
- intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
+ intel_gt_ns_to_pm_interval(gt,
+ ei_up * rps->power.up_threshold * 10));
set(uncore, GEN6_RP_DOWN_EI,
intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
set(uncore, GEN6_RP_DOWN_THRESHOLD,
- intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
+ intel_gt_ns_to_pm_interval(gt,
+ ei_down *
+ rps->power.down_threshold * 10));
set(uncore, GEN6_RP_CONTROL,
(GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
@@ -730,8 +734,6 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
skip_hw_write:
rps->power.mode = new_power;
- rps->power.up_threshold = threshold_up;
- rps->power.down_threshold = threshold_down;
}
static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
@@ -973,7 +975,7 @@ static int rps_set_boost_freq(struct intel_rps *rps, u32 val)
}
mutex_unlock(&rps->lock);
if (boost)
- schedule_work(&rps->work);
+ queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
return 0;
}
@@ -1025,7 +1027,8 @@ void intel_rps_boost(struct i915_request *rq)
if (!atomic_fetch_inc(&slpc->num_waiters)) {
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
- schedule_work(&slpc->boost_work);
+ queue_work(rps_to_gt(rps)->i915->unordered_wq,
+ &slpc->boost_work);
}
return;
@@ -1041,7 +1044,7 @@ void intel_rps_boost(struct i915_request *rq)
rq->fence.context, rq->fence.seqno);
if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
- schedule_work(&rps->work);
+ queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
}
@@ -1557,10 +1560,12 @@ void intel_rps_enable(struct intel_rps *rps)
return;
GT_TRACE(rps_to_gt(rps),
- "min:%x, max:%x, freq:[%d, %d]\n",
+ "min:%x, max:%x, freq:[%d, %d], thresholds:[%u, %u]\n",
rps->min_freq, rps->max_freq,
intel_gpu_freq(rps, rps->min_freq),
- intel_gpu_freq(rps, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq),
+ rps->power.up_threshold,
+ rps->power.down_threshold);
GEM_BUG_ON(rps->max_freq < rps->min_freq);
GEM_BUG_ON(rps->idle_freq > rps->max_freq);
@@ -1900,7 +1905,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events;
- schedule_work(&rps->work);
+ queue_work(gt->i915->unordered_wq, &rps->work);
}
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
@@ -1917,7 +1922,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events;
- schedule_work(&rps->work);
+ queue_work(gt->i915->unordered_wq, &rps->work);
spin_unlock(gt->irq_lock);
}
@@ -2013,6 +2018,12 @@ void intel_rps_init(struct intel_rps *rps)
}
}
+ /* Set default thresholds in % */
+ rps->power.up_threshold = 95;
+ rps_to_gt(rps)->defaults.rps_up_threshold = rps->power.up_threshold;
+ rps->power.down_threshold = 85;
+ rps_to_gt(rps)->defaults.rps_down_threshold = rps->power.down_threshold;
+
/* Finally allow us to boost to max by default */
rps->boost_freq = rps->max_freq;
rps->idle_freq = rps->min_freq;
@@ -2567,6 +2578,58 @@ int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
return set_min_freq(rps, val);
}
+u8 intel_rps_get_up_threshold(struct intel_rps *rps)
+{
+ return rps->power.up_threshold;
+}
+
+static int rps_set_threshold(struct intel_rps *rps, u8 *threshold, u8 val)
+{
+ int ret;
+
+ if (val > 100)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&rps->lock);
+ if (ret)
+ return ret;
+
+ if (*threshold == val)
+ goto out_unlock;
+
+ *threshold = val;
+
+ /* Force reset. */
+ rps->last_freq = -1;
+ mutex_lock(&rps->power.mutex);
+ rps->power.mode = -1;
+ mutex_unlock(&rps->power.mutex);
+
+ intel_rps_set(rps, clamp(rps->cur_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit));
+
+out_unlock:
+ mutex_unlock(&rps->lock);
+
+ return ret;
+}
+
+int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold)
+{
+ return rps_set_threshold(rps, &rps->power.up_threshold, threshold);
+}
+
+u8 intel_rps_get_down_threshold(struct intel_rps *rps)
+{
+ return rps->power.down_threshold;
+}
+
+int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold)
+{
+ return rps_set_threshold(rps, &rps->power.down_threshold, threshold);
+}
+
static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index a3fa987aa91f..92fb01f5a452 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -37,6 +37,10 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
int intel_gpu_freq(struct intel_rps *rps, int val);
int intel_freq_opcode(struct intel_rps *rps, int val);
+u8 intel_rps_get_up_threshold(struct intel_rps *rps);
+int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold);
+u8 intel_rps_get_down_threshold(struct intel_rps *rps);
+int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold);
u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps);
u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c
index e8f3d18c12b8..8c1dbcbcbc4f 100644
--- a/drivers/gpu/drm/i915/gt/intel_sa_media.c
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c
@@ -29,7 +29,7 @@ int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
* Standalone media shares the general MMIO space with the primary
* GT. We'll re-use the primary GT's mapping.
*/
- uncore->regs = i915->uncore.regs;
+ uncore->regs = intel_uncore_regs(&i915->uncore);
if (drm_WARN_ON(&i915->drm, uncore->regs == NULL))
return -EIO;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 1141f875f5bd..f602895f6d0d 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -302,7 +302,7 @@ static void gen11_sseu_info_init(struct intel_gt *gt)
u8 eu_en;
u8 s_en;
- if (IS_JSL_EHL(gt->i915))
+ if (IS_JASPERLAKE(gt->i915) || IS_ELKHARTLAKE(gt->i915))
intel_sseu_set_info(sseu, 1, 4, 8);
else
intel_sseu_set_info(sseu, 1, 8, 8);
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c
new file mode 100644
index 000000000000..139608c30d97
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_perf_oa_regs.h"
+#include "intel_engine_pm.h"
+#include "intel_gt.h"
+#include "intel_gt_mcr.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_print.h"
+#include "intel_gt_regs.h"
+#include "intel_tlb.h"
+
+/*
+ * HW architecture suggest typical invalidation time at 40us,
+ * with pessimistic cases up to 100us and a recommendation to
+ * cap at 1ms. We go a bit higher just in case.
+ */
+#define TLB_INVAL_TIMEOUT_US 100
+#define TLB_INVAL_TIMEOUT_MS 4
+
+/*
+ * On Xe_HP the TLB invalidation registers are located at the same MMIO offsets
+ * but are now considered MCR registers. Since they exist within a GAM range,
+ * the primary instance of the register rolls up the status from each unit.
+ */
+static int wait_for_invalidate(struct intel_engine_cs *engine)
+{
+ if (engine->tlb_inv.mcr)
+ return intel_gt_mcr_wait_for_reg(engine->gt,
+ engine->tlb_inv.reg.mcr_reg,
+ engine->tlb_inv.done,
+ 0,
+ TLB_INVAL_TIMEOUT_US,
+ TLB_INVAL_TIMEOUT_MS);
+ else
+ return __intel_wait_for_register_fw(engine->gt->uncore,
+ engine->tlb_inv.reg.reg,
+ engine->tlb_inv.done,
+ 0,
+ TLB_INVAL_TIMEOUT_US,
+ TLB_INVAL_TIMEOUT_MS,
+ NULL);
+}
+
+static void mmio_invalidate_full(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_engine_cs *engine;
+ intel_engine_mask_t awake, tmp;
+ enum intel_engine_id id;
+ unsigned long flags;
+
+ if (GRAPHICS_VER(i915) < 8)
+ return;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ intel_gt_mcr_lock(gt, &flags);
+ spin_lock(&uncore->lock); /* serialise invalidate with GT reset */
+
+ awake = 0;
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_pm_is_awake(engine))
+ continue;
+
+ if (engine->tlb_inv.mcr)
+ intel_gt_mcr_multicast_write_fw(gt,
+ engine->tlb_inv.reg.mcr_reg,
+ engine->tlb_inv.request);
+ else
+ intel_uncore_write_fw(uncore,
+ engine->tlb_inv.reg.reg,
+ engine->tlb_inv.request);
+
+ awake |= engine->mask;
+ }
+
+ GT_TRACE(gt, "invalidated engines %08x\n", awake);
+
+ /* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
+ if (awake &&
+ (IS_TIGERLAKE(i915) ||
+ IS_DG1(i915) ||
+ IS_ROCKETLAKE(i915) ||
+ IS_ALDERLAKE_S(i915) ||
+ IS_ALDERLAKE_P(i915)))
+ intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
+
+ spin_unlock(&uncore->lock);
+ intel_gt_mcr_unlock(gt, flags);
+
+ for_each_engine_masked(engine, gt, awake, tmp) {
+ if (wait_for_invalidate(engine))
+ gt_err_ratelimited(gt,
+ "%s TLB invalidation did not complete in %ums!\n",
+ engine->name, TLB_INVAL_TIMEOUT_MS);
+ }
+
+ /*
+ * Use delayed put since a) we mostly expect a flurry of TLB
+ * invalidations so it is good to avoid paying the forcewake cost and
+ * b) it works around a bug in Icelake which cannot cope with too rapid
+ * transitions.
+ */
+ intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
+}
+
+static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
+{
+ u32 cur = intel_gt_tlb_seqno(gt);
+
+ /* Only skip if a *full* TLB invalidate barrier has passed */
+ return (s32)(cur - ALIGN(seqno, 2)) > 0;
+}
+
+void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
+{
+ intel_wakeref_t wakeref;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (intel_gt_is_wedged(gt))
+ return;
+
+ if (tlb_seqno_passed(gt, seqno))
+ return;
+
+ with_intel_gt_pm_if_awake(gt, wakeref) {
+ mutex_lock(&gt->tlb.invalidate_lock);
+ if (tlb_seqno_passed(gt, seqno))
+ goto unlock;
+
+ mmio_invalidate_full(gt);
+
+ write_seqcount_invalidate(&gt->tlb.seqno);
+unlock:
+ mutex_unlock(&gt->tlb.invalidate_lock);
+ }
+}
+
+void intel_gt_init_tlb(struct intel_gt *gt)
+{
+ mutex_init(&gt->tlb.invalidate_lock);
+ seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
+}
+
+void intel_gt_fini_tlb(struct intel_gt *gt)
+{
+ mutex_destroy(&gt->tlb.invalidate_lock);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_tlb.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.h b/drivers/gpu/drm/i915/gt/intel_tlb.h
new file mode 100644
index 000000000000..337327af92ac
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef INTEL_TLB_H
+#define INTEL_TLB_H
+
+#include <linux/seqlock.h>
+#include <linux/types.h>
+
+#include "intel_gt_types.h"
+
+void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno);
+
+void intel_gt_init_tlb(struct intel_gt *gt);
+void intel_gt_fini_tlb(struct intel_gt *gt);
+
+static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
+{
+ return seqprop_sequence(&gt->tlb.seqno);
+}
+
+static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
+{
+ return intel_gt_tlb_seqno(gt) | 1;
+}
+
+#endif /* INTEL_TLB_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 4d2dece96011..3ae0dbd39eaa 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -123,6 +123,22 @@ static void wa_init_finish(struct i915_wa_list *wal)
wal->wa_count, wal->name, wal->engine_name);
}
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
+{
+ enum forcewake_domains fw = 0;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ fw |= intel_uncore_forcewake_for_reg(uncore,
+ wa->reg,
+ FW_REG_READ |
+ FW_REG_WRITE);
+
+ return fw;
+}
+
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
{
unsigned int addr = i915_mmio_reg_offset(wa->reg);
@@ -225,13 +241,13 @@ static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
static void
wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
{
- wa_add(wal, reg, clear, set, clear, false);
+ wa_add(wal, reg, clear, set, clear | set, false);
}
static void
wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
{
- wa_mcr_add(wal, reg, clear, set, clear, false);
+ wa_mcr_add(wal, reg, clear, set, clear | set, false);
}
static void
@@ -404,7 +420,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -584,7 +600,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
+ if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
@@ -621,10 +637,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
/* Wa_1406697149 (WaDisableBankHangMode:icl) */
- wa_write(wal,
- GEN8_L3CNTLREG,
- intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
- GEN8_ERRDETBCTRL);
+ wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
/* WaForceEnableNonCoherent:icl
* This is not the same workaround as in early Gen9 platforms, where
@@ -653,7 +666,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_1604278689:icl,ehl */
wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
- 0, /* write-only register; skip validation */
+ 0,
0xFFFFFFFF);
/* Wa_1406306137:icl,ehl */
@@ -670,38 +683,8 @@ static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
- wa_mcr_add(wal,
- XEHP_FF_MODE2,
- FF_MODE2_TDS_TIMER_MASK,
- FF_MODE2_TDS_TIMER_128,
- 0, false);
-}
-
-/*
- * These settings aren't actually workarounds, but general tuning settings that
- * need to be programmed on several platforms.
- */
-static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine,
- struct i915_wa_list *wal)
-{
- /*
- * Although some platforms refer to it as Wa_1604555607, we need to
- * program it even on those that don't explicitly list that
- * workaround.
- *
- * Note that the programming of this register is further modified
- * according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
- * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
- * value when read. The default value for this register is zero for all
- * fields and there are no bit masks. So instead of doing a RMW we
- * should just write TDS timer value. For the same reason read
- * verification is ignored.
- */
- wa_add(wal,
- GEN12_FF_MODE2,
- FF_MODE2_TDS_TIMER_MASK,
- FF_MODE2_TDS_TIMER_128,
- 0, false);
+ wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128);
}
static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -709,8 +692,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
{
struct drm_i915_private *i915 = engine->i915;
- gen12_ctx_gt_tuning_init(engine, wal);
-
/*
* Wa_1409142259:tgl,dg1,adl-p
* Wa_1409347922:tgl,dg1,adl-p
@@ -732,15 +713,27 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
/*
- * Wa_16011163337
+ * Wa_16011163337 - GS_TIMER
+ *
+ * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
+ * need to program it even on those that don't explicitly list that
+ * workaround.
+ *
+ * Note that the programming of GEN12_FF_MODE2 is further modified
+ * according to the FF_MODE2 guidance given by Wa_1608008084.
+ * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
+ * value when read from the CPU.
*
- * Like in gen12_ctx_gt_tuning_init(), read verification is ignored due
- * to Wa_1608008084.
+ * The default value for this register is zero for all fields.
+ * So instead of doing a RMW we should just write the desired values
+ * for TDS and GS timers. Note that since the readback can't be trusted,
+ * the clear mask is just set to ~0 to make sure other bits are not
+ * inadvertently set. For the same reason read verification is ignored.
*/
wa_add(wal,
GEN12_FF_MODE2,
- FF_MODE2_GS_TIMER_MASK,
- FF_MODE2_GS_TIMER_224,
+ ~0,
+ FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
0, false);
if (!IS_DG1(i915)) {
@@ -987,6 +980,9 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
int intel_engine_emit_ctx_wa(struct i915_request *rq)
{
struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
+ struct intel_uncore *uncore = rq->engine->uncore;
+ enum forcewake_domains fw;
+ unsigned long flags;
struct i915_wa *wa;
unsigned int i;
u32 *cs;
@@ -1003,13 +999,36 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
if (IS_ERR(cs))
return PTR_ERR(cs);
+ fw = wal_get_fw_for_rmw(uncore, wal);
+
+ intel_gt_mcr_lock(wal->gt, &flags);
+ spin_lock(&uncore->lock);
+ intel_uncore_forcewake_get__locked(uncore, fw);
+
*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 val;
+
+ /* Skip reading the register if it's not really needed */
+ if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
+ val = wa->set;
+ } else {
+ val = wa->is_mcr ?
+ intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
+ intel_uncore_read_fw(uncore, wa->reg);
+ val &= ~wa->clr;
+ val |= wa->set;
+ }
+
*cs++ = i915_mmio_reg_offset(wa->reg);
- *cs++ = wa->set;
+ *cs++ = val;
}
*cs++ = MI_NOOP;
+ intel_uncore_forcewake_put__locked(uncore, fw);
+ spin_unlock(&uncore->lock);
+ intel_gt_mcr_unlock(wal->gt, flags);
+
intel_ring_advance(rq, cs);
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
@@ -1173,7 +1192,7 @@ skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
+ if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
@@ -1185,7 +1204,7 @@ kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
gen9_gt_workarounds_init(gt, wal);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
+ if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
@@ -1441,7 +1460,8 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_1607087056:icl,ehl,jsl */
if (IS_ICELAKE(i915) ||
- IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
+ ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
+ IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
wa_write_or(wal,
GEN11_SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
@@ -1485,6 +1505,18 @@ gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
+
+ /*
+ * Wa_14015795083
+ *
+ * Firmware on some gen12 platforms locks the MISCCPCTL register,
+ * preventing i915 from modifying it for this workaround. Skip the
+ * readback verification for this workaround on debug builds; if the
+ * workaround doesn't stick due to firmware behavior, it's not an error
+ * that we want CI to flag.
+ */
+ wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
+ 0, 0, false);
}
static void
@@ -1710,7 +1742,6 @@ static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Wa_14018778641 / Wa_18018781329 */
- wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
@@ -1743,8 +1774,6 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
* GT, the media GT's versions are regular singleton registers.
*/
wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
- wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
- wa_write_or(wal, XELPMP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
debug_dump_steering(gt);
}
@@ -1850,22 +1879,6 @@ void intel_gt_init_workarounds(struct intel_gt *gt)
wa_init_finish(wal);
}
-static enum forcewake_domains
-wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
-{
- enum forcewake_domains fw = 0;
- struct i915_wa *wa;
- unsigned int i;
-
- for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- fw |= intel_uncore_forcewake_for_reg(uncore,
- wa->reg,
- FW_REG_READ |
- FW_REG_WRITE);
-
- return fw;
-}
-
static bool
wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
const char *name, const char *from)
@@ -2933,7 +2946,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
+ if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
@@ -3237,7 +3250,7 @@ wa_list_srm(struct i915_request *rq,
const struct i915_wa_list *wal,
struct i915_vma *vma)
{
- struct drm_i915_private *i915 = rq->engine->i915;
+ struct drm_i915_private *i915 = rq->i915;
unsigned int i, count = 0;
const struct i915_wa *wa;
u32 srm, *cs;
@@ -3336,7 +3349,7 @@ retry:
err = 0;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
- if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg)))
+ if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
continue;
if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 76fbae358072..47070cba7eb1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -88,8 +88,9 @@ static int __live_context_size(struct intel_engine_cs *engine)
goto err;
vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915,
- ce->state->obj, false));
+ intel_gt_coherent_map_type(engine->gt,
+ ce->state->obj,
+ false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 542ce6d2de19..86cecf7a1105 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -27,7 +27,7 @@ static void perf_begin(struct intel_gt *gt)
/* Boost gpufreq to max [waitboost] and keep it fixed */
atomic_inc(&gt->rps.num_waiters);
- schedule_work(&gt->rps.work);
+ queue_work(gt->i915->unordered_wq, &gt->rps.work);
flush_work(&gt->rps.work);
}
@@ -62,7 +62,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
- if (GRAPHICS_VER(rq->engine->i915) >= 8)
+ if (GRAPHICS_VER(rq->i915) >= 8)
cmd++;
*cs++ = cmd;
*cs++ = i915_mmio_reg_offset(timestamp_reg(rq->engine));
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 8b0d84f2aad2..0dd4d00ee894 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -73,7 +73,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map_unlocked(h->obj,
- i915_coherent_map_type(gt->i915, h->obj, false));
+ intel_gt_coherent_map_type(gt, h->obj, false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -119,7 +119,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
return ERR_CAST(obj);
}
- vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false));
+ vaddr = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, false));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
i915_vm_put(vm);
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index a78a3d2c2e16..5f826b6dcf5d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1292,9 +1292,9 @@ static int compare_isolation(struct intel_engine_cs *engine,
}
lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915,
- ce->state->obj,
- false));
+ intel_gt_coherent_map_type(engine->gt,
+ ce->state->obj,
+ false));
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
goto err_B1;
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index a8446ab82501..d73e438fb85f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -137,7 +137,7 @@ static int read_mocs_table(struct i915_request *rq,
if (!table)
return 0;
- if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
+ if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
addr = global_mocs_offset() + gt->uncore->gsi_offset;
else
addr = mocs_offset(rq->engine);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 2ceeadecc639..a7189c2d660c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -140,7 +140,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce)
}
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
- if (GRAPHICS_VER(rq->engine->i915) >= 8)
+ if (GRAPHICS_VER(rq->i915) >= 8)
cmd++;
*cs++ = cmd;
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 39c3ec12df1a..fa36cf920bde 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -459,12 +459,12 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
if (IS_ERR(cs))
return PTR_ERR(cs);
- if (GRAPHICS_VER(rq->engine->i915) >= 8) {
+ if (GRAPHICS_VER(rq->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = addr;
*cs++ = 0;
*cs++ = value;
- } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
+ } else if (GRAPHICS_VER(rq->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = addr;
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index 3bd6b540257b..7e41f69fc818 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -6,6 +6,7 @@
#include "i915_selftest.h"
#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gen8_engine_cs.h"
@@ -354,7 +355,7 @@ out_a:
static void tlbinv_full(struct i915_address_space *vm, u64 addr, u64 length)
{
- intel_gt_invalidate_tlb(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
+ intel_gt_invalidate_tlb_full(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
}
static int invalidate_full(void *arg)
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 449c9ed44382..bccc3a1200bc 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -33,7 +33,6 @@ struct file *shmem_create_from_data(const char *name, void *data, size_t len)
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
enum i915_map_type map_type;
struct file *file;
void *ptr;
@@ -44,7 +43,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file;
}
- map_type = i915_coherent_map_type(i915, obj, true);
+ map_type = i915_gem_object_is_lmem(obj) ? I915_MAP_WC : I915_MAP_WB;
ptr = i915_gem_object_pin_map_unlocked(obj, map_type);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_binary_headers.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_binary_headers.h
index 714f0c256118..6d009a905269 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_binary_headers.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_binary_headers.h
@@ -8,6 +8,74 @@
#include <linux/types.h>
+struct intel_gsc_version {
+ u16 major;
+ u16 minor;
+ u16 hotfix;
+ u16 build;
+} __packed;
+
+struct intel_gsc_partition {
+ u32 offset;
+ u32 size;
+} __packed;
+
+struct intel_gsc_layout_pointers {
+ u8 rom_bypass_vector[16];
+
+ /* size of pointers layout not including ROM bypass vector */
+ u16 size;
+
+ /*
+ * bit0: Backup copy of layout pointers exist
+ * bits1-15: reserved
+ */
+ u8 flags;
+
+ u8 reserved;
+
+ u32 crc32;
+
+ struct intel_gsc_partition datap;
+ struct intel_gsc_partition boot1;
+ struct intel_gsc_partition boot2;
+ struct intel_gsc_partition boot3;
+ struct intel_gsc_partition boot4;
+ struct intel_gsc_partition boot5;
+ struct intel_gsc_partition temp_pages;
+} __packed;
+
+/* Boot partition structures */
+struct intel_gsc_bpdt_header {
+ u32 signature;
+#define INTEL_GSC_BPDT_HEADER_SIGNATURE 0x000055AA
+
+ u16 descriptor_count; /* num of entries after the header */
+
+ u8 version;
+ u8 configuration;
+
+ u32 crc32;
+
+ u32 build_version;
+ struct intel_gsc_version tool_version;
+} __packed;
+
+struct intel_gsc_bpdt_entry {
+ /*
+ * Bits 0-15: BPDT entry type
+ * Bits 16-17: reserved
+ * Bit 18: code sub-partition
+ * Bits 19-31: reserved
+ */
+ u32 type;
+#define INTEL_GSC_BPDT_ENTRY_TYPE_MASK GENMASK(15, 0)
+#define INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE 0x1
+
+ u32 sub_partition_offset; /* from the base of the BPDT header */
+ u32 sub_partition_size;
+} __packed;
+
/* Code partition directory (CPD) structures */
struct intel_gsc_cpd_header_v2 {
u32 header_marker;
@@ -44,13 +112,6 @@ struct intel_gsc_cpd_entry {
u8 reserved[4];
} __packed;
-struct intel_gsc_version {
- u16 major;
- u16 minor;
- u16 hotfix;
- u16 build;
-} __packed;
-
struct intel_gsc_manifest_header {
u32 header_type; /* 0x4 for manifest type */
u32 header_length; /* in dwords */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index f46eb17a7a98..e2e42b3e0d5d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -3,42 +3,216 @@
* Copyright © 2022 Intel Corporation
*/
+#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
#include "gt/intel_ring.h"
+#include "intel_gsc_binary_headers.h"
#include "intel_gsc_fw.h"
-
-#define GSC_FW_STATUS_REG _MMIO(0x116C40)
-#define GSC_FW_CURRENT_STATE REG_GENMASK(3, 0)
-#define GSC_FW_CURRENT_STATE_RESET 0
-#define GSC_FW_PROXY_STATE_NORMAL 5
-#define GSC_FW_INIT_COMPLETE_BIT REG_BIT(9)
+#include "intel_gsc_uc_heci_cmd_submit.h"
+#include "i915_reg.h"
static bool gsc_is_in_reset(struct intel_uncore *uncore)
{
- u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
+ u32 fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
+
+ return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fw_status) ==
+ HECI1_FWSTS1_CURRENT_STATE_RESET;
+}
+
+static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore, bool needs_wakeref)
+{
+ intel_wakeref_t wakeref;
+ u32 fw_status = 0;
- return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
- GSC_FW_CURRENT_STATE_RESET;
+ if (needs_wakeref)
+ wakeref = intel_runtime_pm_get(uncore->rpm);
+
+ fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
+
+ if (needs_wakeref)
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+ return fw_status;
+}
+
+bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref)
+{
+ return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE,
+ gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore,
+ needs_wakeref)) ==
+ HECI1_FWSTS1_PROXY_STATE_NORMAL;
}
-bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc)
+int intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc *gsc)
{
- struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore;
- u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
+ if (!(IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)))
+ return -ENODEV;
+ if (!intel_uc_fw_is_loadable(&gsc->fw))
+ return -ENODEV;
+ if (__intel_uc_fw_status(&gsc->fw) == INTEL_UC_FIRMWARE_LOAD_FAIL)
+ return -ENOLINK;
+ if (!intel_gsc_uc_fw_proxy_init_done(gsc, true))
+ return -EAGAIN;
- return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
- GSC_FW_PROXY_STATE_NORMAL;
+ return 0;
}
bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
{
- struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore;
- u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
+ return gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore, false) &
+ HECI1_FWSTS1_INIT_COMPLETE;
+}
+
+static inline u32 cpd_entry_offset(const struct intel_gsc_cpd_entry *entry)
+{
+ return entry->offset & INTEL_GSC_CPD_ENTRY_OFFSET_MASK;
+}
+
+int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size)
+{
+ struct intel_gsc_uc *gsc = container_of(gsc_fw, struct intel_gsc_uc, fw);
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ const struct intel_gsc_layout_pointers *layout = data;
+ const struct intel_gsc_bpdt_header *bpdt_header = NULL;
+ const struct intel_gsc_bpdt_entry *bpdt_entry = NULL;
+ const struct intel_gsc_cpd_header_v2 *cpd_header = NULL;
+ const struct intel_gsc_cpd_entry *cpd_entry = NULL;
+ const struct intel_gsc_manifest_header *manifest;
+ size_t min_size = sizeof(*layout);
+ int i;
+
+ if (size < min_size) {
+ gt_err(gt, "GSC FW too small! %zu < %zu\n", size, min_size);
+ return -ENODATA;
+ }
+
+ /*
+ * The GSC binary starts with the pointer layout, which contains the
+ * locations of the various partitions of the binary. The one we're
+ * interested in to get the version is the boot1 partition, where we can
+ * find a BPDT header followed by entries, one of which points to the
+ * RBE sub-section of the partition. From here, we can parse the CPD
+ * header and the following entries to find the manifest location
+ * (entry identified by the "RBEP.man" name), from which we can finally
+ * extract the version.
+ *
+ * --------------------------------------------------
+ * [ intel_gsc_layout_pointers ]
+ * [ ... ]
+ * [ boot1.offset >---------------------------]------o
+ * [ ... ] |
+ * -------------------------------------------------- |
+ * |
+ * -------------------------------------------------- |
+ * [ intel_gsc_bpdt_header ]<-----o
+ * --------------------------------------------------
+ * [ intel_gsc_bpdt_entry[] ]
+ * [ entry1 ]
+ * [ ... ]
+ * [ entryX ]
+ * [ type == GSC_RBE ]
+ * [ offset >-----------------------------]------o
+ * [ ... ] |
+ * -------------------------------------------------- |
+ * |
+ * -------------------------------------------------- |
+ * [ intel_gsc_cpd_header_v2 ]<-----o
+ * --------------------------------------------------
+ * [ intel_gsc_cpd_entry[] ]
+ * [ entry1 ]
+ * [ ... ]
+ * [ entryX ]
+ * [ "RBEP.man" ]
+ * [ ... ]
+ * [ offset >----------------------------]------o
+ * [ ... ] |
+ * -------------------------------------------------- |
+ * |
+ * -------------------------------------------------- |
+ * [ intel_gsc_manifest_header ]<-----o
+ * [ ... ]
+ * [ intel_gsc_version fw_version ]
+ * [ ... ]
+ * --------------------------------------------------
+ */
+
+ min_size = layout->boot1.offset + layout->boot1.size;
+ if (size < min_size) {
+ gt_err(gt, "GSC FW too small for boot section! %zu < %zu\n",
+ size, min_size);
+ return -ENODATA;
+ }
+
+ min_size = sizeof(*bpdt_header);
+ if (layout->boot1.size < min_size) {
+ gt_err(gt, "GSC FW boot section too small for BPDT header: %u < %zu\n",
+ layout->boot1.size, min_size);
+ return -ENODATA;
+ }
+
+ bpdt_header = data + layout->boot1.offset;
+ if (bpdt_header->signature != INTEL_GSC_BPDT_HEADER_SIGNATURE) {
+ gt_err(gt, "invalid signature for BPDT header: 0x%08x!\n",
+ bpdt_header->signature);
+ return -EINVAL;
+ }
+
+ min_size += sizeof(*bpdt_entry) * bpdt_header->descriptor_count;
+ if (layout->boot1.size < min_size) {
+ gt_err(gt, "GSC FW boot section too small for BPDT entries: %u < %zu\n",
+ layout->boot1.size, min_size);
+ return -ENODATA;
+ }
+
+ bpdt_entry = (void *)bpdt_header + sizeof(*bpdt_header);
+ for (i = 0; i < bpdt_header->descriptor_count; i++, bpdt_entry++) {
+ if ((bpdt_entry->type & INTEL_GSC_BPDT_ENTRY_TYPE_MASK) !=
+ INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE)
+ continue;
+
+ cpd_header = (void *)bpdt_header + bpdt_entry->sub_partition_offset;
+ min_size = bpdt_entry->sub_partition_offset + sizeof(*cpd_header);
+ break;
+ }
+
+ if (!cpd_header) {
+ gt_err(gt, "couldn't find CPD header in GSC binary!\n");
+ return -ENODATA;
+ }
+
+ if (layout->boot1.size < min_size) {
+ gt_err(gt, "GSC FW boot section too small for CPD header: %u < %zu\n",
+ layout->boot1.size, min_size);
+ return -ENODATA;
+ }
+
+ if (cpd_header->header_marker != INTEL_GSC_CPD_HEADER_MARKER) {
+ gt_err(gt, "invalid marker for CPD header in GSC bin: 0x%08x!\n",
+ cpd_header->header_marker);
+ return -EINVAL;
+ }
+
+ min_size += sizeof(*cpd_entry) * cpd_header->num_of_entries;
+ if (layout->boot1.size < min_size) {
+ gt_err(gt, "GSC FW boot section too small for CPD entries: %u < %zu\n",
+ layout->boot1.size, min_size);
+ return -ENODATA;
+ }
+
+ cpd_entry = (void *)cpd_header + cpd_header->header_length;
+ for (i = 0; i < cpd_header->num_of_entries; i++, cpd_entry++) {
+ if (strcmp(cpd_entry->name, "RBEP.man") == 0) {
+ manifest = (void *)cpd_header + cpd_entry_offset(cpd_entry);
+ intel_uc_fw_version_from_gsc_manifest(&gsc->release,
+ manifest);
+ gsc->security_version = manifest->security_version;
+ break;
+ }
+ }
- return fw_status & GSC_FW_INIT_COMPLETE_BIT;
+ return 0;
}
static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
@@ -108,48 +282,25 @@ out_rq:
static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
- struct drm_i915_private *i915 = gt->i915;
- struct drm_i915_gem_object *obj;
- void *src, *dst;
+ void *src;
if (!gsc->local)
return -ENODEV;
- obj = gsc->local->obj;
-
- if (obj->base.size < gsc->fw.size)
+ if (gsc->local->size < gsc->fw.size)
return -ENOSPC;
- /*
- * Wa_22016122933: For MTL the shared memory needs to be mapped
- * as WC on CPU side and UC (PAT index 2) on GPU side
- */
- if (IS_METEORLAKE(i915))
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
-
- dst = i915_gem_object_pin_map_unlocked(obj,
- i915_coherent_map_type(i915, obj, true));
- if (IS_ERR(dst))
- return PTR_ERR(dst);
-
src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
- i915_coherent_map_type(i915, gsc->fw.obj, true));
- if (IS_ERR(src)) {
- i915_gem_object_unpin_map(obj);
+ intel_gt_coherent_map_type(gt, gsc->fw.obj, true));
+ if (IS_ERR(src))
return PTR_ERR(src);
- }
- memset(dst, 0, obj->base.size);
- memcpy(dst, src, gsc->fw.size);
+ memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
+ memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
- /*
- * Wa_22016122933: Making sure the data in dst is
- * visible to GSC right away
- */
intel_guc_write_barrier(&gt->uc.guc);
i915_gem_object_unpin_map(gsc->fw.obj);
- i915_gem_object_unpin_map(obj);
return 0;
}
@@ -157,12 +308,94 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
static int gsc_fw_wait(struct intel_gt *gt)
{
return intel_wait_for_register(gt->uncore,
- GSC_FW_STATUS_REG,
- GSC_FW_INIT_COMPLETE_BIT,
- GSC_FW_INIT_COMPLETE_BIT,
+ HECI_FWSTS(MTL_GSC_HECI1_BASE, 1),
+ HECI1_FWSTS1_INIT_COMPLETE,
+ HECI1_FWSTS1_INIT_COMPLETE,
500);
}
+struct intel_gsc_mkhi_header {
+ u8 group_id;
+#define MKHI_GROUP_ID_GFX_SRV 0x30
+
+ u8 command;
+#define MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION (0x42)
+
+ u8 reserved;
+ u8 result;
+} __packed;
+
+struct mtl_gsc_ver_msg_in {
+ struct intel_gsc_mtl_header header;
+ struct intel_gsc_mkhi_header mkhi;
+} __packed;
+
+struct mtl_gsc_ver_msg_out {
+ struct intel_gsc_mtl_header header;
+ struct intel_gsc_mkhi_header mkhi;
+ u16 proj_major;
+ u16 compat_major;
+ u16 compat_minor;
+ u16 reserved[5];
+} __packed;
+
+#define GSC_VER_PKT_SZ SZ_4K
+
+static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct mtl_gsc_ver_msg_in *msg_in;
+ struct mtl_gsc_ver_msg_out *msg_out;
+ struct i915_vma *vma;
+ u64 offset;
+ void *vaddr;
+ int err;
+
+ err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_VER_PKT_SZ * 2,
+ &vma, &vaddr);
+ if (err) {
+ gt_err(gt, "failed to allocate vma for GSC version query\n");
+ return err;
+ }
+
+ offset = i915_ggtt_offset(vma);
+ msg_in = vaddr;
+ msg_out = vaddr + GSC_VER_PKT_SZ;
+
+ intel_gsc_uc_heci_cmd_emit_mtl_header(&msg_in->header,
+ HECI_MEADDRESS_MKHI,
+ sizeof(*msg_in), 0);
+ msg_in->mkhi.group_id = MKHI_GROUP_ID_GFX_SRV;
+ msg_in->mkhi.command = MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION;
+
+ err = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc,
+ offset,
+ sizeof(*msg_in),
+ offset + GSC_VER_PKT_SZ,
+ GSC_VER_PKT_SZ);
+ if (err) {
+ gt_err(gt,
+ "failed to submit GSC request for compatibility version: %d\n",
+ err);
+ goto out_vma;
+ }
+
+ if (msg_out->header.message_size != sizeof(*msg_out)) {
+ gt_err(gt, "invalid GSC reply length %u [expected %zu], s=0x%x, f=0x%x, r=0x%x\n",
+ msg_out->header.message_size, sizeof(*msg_out),
+ msg_out->header.status, msg_out->header.flags, msg_out->mkhi.result);
+ err = -EPROTO;
+ goto out_vma;
+ }
+
+ gsc->fw.file_selected.ver.major = msg_out->compat_major;
+ gsc->fw.file_selected.ver.minor = msg_out->compat_minor;
+
+out_vma:
+ i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
@@ -220,10 +453,24 @@ int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
if (err)
goto fail;
+ err = gsc_fw_query_compatibility_version(gsc);
+ if (err)
+ goto fail;
+
+ /* we only support compatibility version 1.0 at the moment */
+ err = intel_uc_check_file_version(gsc_fw, NULL);
+ if (err)
+ goto fail;
+
/* FW is not fully operational until we enable SW proxy */
intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
- gt_info(gt, "Loaded GSC firmware %s\n", gsc_fw->file_selected.path);
+ gt_info(gt, "Loaded GSC firmware %s (cv%u.%u, r%u.%u.%u.%u, svn %u)\n",
+ gsc_fw->file_selected.path,
+ gsc_fw->file_selected.ver.major, gsc_fw->file_selected.ver.minor,
+ gsc->release.major, gsc->release.minor,
+ gsc->release.patch, gsc->release.build,
+ gsc->security_version);
return 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
index fff8928218df..bc9dd0de8aaf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
@@ -9,10 +9,13 @@
#include <linux/types.h>
struct intel_gsc_uc;
+struct intel_uc_fw;
struct intel_uncore;
+int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size);
int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc);
bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc);
-bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc);
+bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref);
+int intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc *gsc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
index c659cc01f32f..0d3b22a74365 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -7,10 +7,11 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
-#include "intel_gsc_uc.h"
#include "intel_gsc_fw.h"
-#include "i915_drv.h"
#include "intel_gsc_proxy.h"
+#include "intel_gsc_uc.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
static void gsc_work(struct work_struct *work)
{
@@ -61,8 +62,18 @@ static void gsc_work(struct work_struct *work)
}
ret = intel_gsc_proxy_request_handler(gsc);
- if (ret)
+ if (ret) {
+ if (actions & GSC_ACTION_FW_LOAD) {
+ /*
+ * A proxy failure right after firmware load means the proxy-init
+ * step has failed so mark GSC as not usable after this
+ */
+ drm_err(&gt->i915->drm,
+ "GSC proxy handler failed to init\n");
+ intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
+ }
goto out_put;
+ }
/* mark the GSC FW init as done the first time we run this */
if (actions & GSC_ACTION_FW_LOAD) {
@@ -71,12 +82,13 @@ static void gsc_work(struct work_struct *work)
* complete the request handling cleanly, so we need to check the
* status register to check if the proxy init was actually successful
*/
- if (intel_gsc_uc_fw_proxy_init_done(gsc)) {
+ if (intel_gsc_uc_fw_proxy_init_done(gsc, false)) {
drm_dbg(&gt->i915->drm, "GSC Proxy initialized\n");
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING);
} else {
drm_err(&gt->i915->drm,
"GSC status reports proxy init not complete\n");
+ intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
}
}
}
@@ -98,7 +110,7 @@ static bool gsc_engine_supported(struct intel_gt *gt)
GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
if (gt_is_root(gt))
- mask = RUNTIME_INFO(gt->i915)->platform_engine_mask;
+ mask = INTEL_INFO(gt->i915)->platform_engine_mask;
else
mask = gt->info.engine_mask;
@@ -133,26 +145,85 @@ void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc)
}
}
+static int gsc_allocate_and_map_vma(struct intel_gsc_uc *gsc, u32 size)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void __iomem *vaddr;
+ int ret = 0;
+
+ /*
+ * The GSC FW doesn't immediately suspend after becoming idle, so there
+ * is a chance that it could still be awake after we successfully
+ * return from the pci suspend function, even if there are no pending
+ * operations.
+ * The FW might therefore try to access memory for its suspend operation
+ * after the kernel has completed the HW suspend flow; this can cause
+ * issues if the FW is mapped in normal RAM memory, as some of the
+ * involved HW units might've already lost power.
+ * The driver must therefore avoid this situation and the recommended
+ * way to do so is to use stolen memory for the GSC memory allocation,
+ * because stolen memory takes a different path in HW and it is
+ * guaranteed to always work as long as the GPU itself is awake (which
+ * it must be if the GSC is awake).
+ */
+ obj = i915_gem_object_create_stolen(gt->i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
+ }
+
+ vaddr = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err;
+ }
+
+ i915_vma_make_unshrinkable(vma);
+
+ gsc->local = vma;
+ gsc->local_vaddr = vaddr;
+
+ return 0;
+
+err:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void gsc_unmap_and_free_vma(struct intel_gsc_uc *gsc)
+{
+ struct i915_vma *vma = fetch_and_zero(&gsc->local);
+
+ if (!vma)
+ return;
+
+ gsc->local_vaddr = NULL;
+ i915_vma_unpin_iomap(vma);
+ i915_gem_object_put(vma->obj);
+}
+
int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
{
static struct lock_class_key gsc_lock;
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct intel_engine_cs *engine = gt->engine[GSC0];
struct intel_context *ce;
- struct i915_vma *vma;
int err;
err = intel_uc_fw_init(&gsc->fw);
if (err)
goto out;
- vma = intel_guc_allocate_vma(&gt->uc.guc, SZ_8M);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
+ err = gsc_allocate_and_map_vma(gsc, SZ_4M);
+ if (err)
goto out_fw;
- }
-
- gsc->local = vma;
ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
I915_GEM_HWS_GSC_ADDR,
@@ -173,7 +244,7 @@ int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
return 0;
out_vma:
- i915_vma_unpin_and_release(&gsc->local, 0);
+ gsc_unmap_and_free_vma(gsc);
out_fw:
intel_uc_fw_fini(&gsc->fw);
out:
@@ -197,7 +268,7 @@ void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
if (gsc->ce)
intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce));
- i915_vma_unpin_and_release(&gsc->local, 0);
+ gsc_unmap_and_free_vma(gsc);
intel_uc_fw_fini(&gsc->fw);
}
@@ -245,3 +316,45 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
queue_work(gsc->wq, &gsc->work);
}
+
+void intel_gsc_uc_load_status(struct intel_gsc_uc *gsc, struct drm_printer *p)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct intel_uncore *uncore = gt->uncore;
+ intel_wakeref_t wakeref;
+
+ if (!intel_gsc_uc_is_supported(gsc)) {
+ drm_printf(p, "GSC not supported\n");
+ return;
+ }
+
+ if (!intel_gsc_uc_is_wanted(gsc)) {
+ drm_printf(p, "GSC disabled\n");
+ return;
+ }
+
+ drm_printf(p, "GSC firmware: %s\n", gsc->fw.file_selected.path);
+ if (gsc->fw.file_selected.path != gsc->fw.file_wanted.path)
+ drm_printf(p, "GSC firmware wanted: %s\n", gsc->fw.file_wanted.path);
+ drm_printf(p, "\tstatus: %s\n", intel_uc_fw_status_repr(gsc->fw.status));
+
+ drm_printf(p, "Release: %u.%u.%u.%u\n",
+ gsc->release.major, gsc->release.minor,
+ gsc->release.patch, gsc->release.build);
+
+ drm_printf(p, "Compatibility Version: %u.%u [min expected %u.%u]\n",
+ gsc->fw.file_selected.ver.major, gsc->fw.file_selected.ver.minor,
+ gsc->fw.file_wanted.ver.major, gsc->fw.file_wanted.ver.minor);
+
+ drm_printf(p, "SVN: %u\n", gsc->security_version);
+
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ u32 i;
+
+ for (i = 1; i <= 6; i++) {
+ u32 status = intel_uncore_read(uncore,
+ HECI_FWSTS(MTL_GSC_HECI1_BASE, i));
+ drm_printf(p, "HECI1 FWSTST%u = 0x%08x\n", i, status);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
index a2a0813b8a76..c8082cf200fc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
@@ -8,6 +8,7 @@
#include "intel_uc_fw.h"
+struct drm_printer;
struct i915_vma;
struct intel_context;
struct i915_gsc_proxy_component;
@@ -17,7 +18,26 @@ struct intel_gsc_uc {
struct intel_uc_fw fw;
/* GSC-specific additions */
+
+ /*
+ * The GSC has 3 version numbers:
+ * - Release version (incremented with each build)
+ * - Security version (incremented on security fix)
+ * - Compatibility version (incremented on interface change)
+ *
+ * The one we care about to use the binary is the last one, so that's
+ * the one we save inside the intel_uc_fw structure. The other two
+ * versions are only used for debug/info purposes, so we save them here.
+ *
+ * Note that the release and security versions are available in the
+ * binary header, while the compatibility version must be queried after
+ * loading the binary.
+ */
+ struct intel_uc_fw_ver release;
+ u32 security_version;
+
struct i915_vma *local; /* private memory for GSC usage */
+ void __iomem *local_vaddr; /* pointer to access the private memory */
struct intel_context *ce; /* for submission to GSC FW via GSC engine */
/* for delayed load and proxy handling */
@@ -44,6 +64,7 @@ void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc);
void intel_gsc_uc_resume(struct intel_gsc_uc *gsc);
void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc);
void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_load_status(struct intel_gsc_uc *gsc, struct drm_printer *p);
static inline bool intel_gsc_uc_is_supported(struct intel_gsc_uc *gsc)
{
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.c
new file mode 100644
index 000000000000..5baacd822a1c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_debugfs.h"
+#include "gt/intel_gt_print.h"
+#include "intel_gsc_uc.h"
+#include "intel_gsc_uc_debugfs.h"
+#include "i915_drv.h"
+
+static int gsc_info_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct intel_gsc_uc *gsc = m->private;
+
+ if (!intel_gsc_uc_is_supported(gsc))
+ return -ENODEV;
+
+ intel_gsc_uc_load_status(gsc, &p);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(gsc_info);
+
+void intel_gsc_uc_debugfs_register(struct intel_gsc_uc *gsc_uc, struct dentry *root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "gsc_info", &gsc_info_fops, NULL },
+ };
+
+ if (!intel_gsc_uc_is_supported(gsc_uc))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gsc_uc);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.h
new file mode 100644
index 000000000000..3415ad39aabb
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GSC_UC_H
+#define DEBUGFS_GSC_UC_H
+
+struct intel_gsc_uc;
+struct dentry;
+
+void intel_gsc_uc_debugfs_register(struct intel_gsc_uc *gsc, struct dentry *root);
+
+#endif /* DEBUGFS_GSC_UC_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
index ef70e304904a..09d3fbdad05a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
@@ -17,6 +17,7 @@ struct intel_gsc_mtl_header {
#define GSC_HECI_VALIDITY_MARKER 0xA578875A
u8 heci_client_id;
+#define HECI_MEADDRESS_MKHI 7
#define HECI_MEADDRESS_PROXY 10
#define HECI_MEADDRESS_PXP 17
#define HECI_MEADDRESS_HDCP 18
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2eb891b270ae..569b5fe94c41 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -745,10 +745,11 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
return ERR_CAST(obj);
/*
- * Wa_22016122933: For MTL the shared memory needs to be mapped
- * as WC on CPU side and UC (PAT index 2) on GPU side
+ * Wa_22016122933: For Media version 13.0, all Media GT shared
+ * memory needs to be mapped as WC on CPU side and UC (PAT
+ * index 2) on GPU side.
*/
- if (IS_METEORLAKE(gt->i915))
+ if (intel_gt_needs_wa_22016122933(gt))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
@@ -792,8 +793,8 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
return PTR_ERR(vma);
vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
- i915_coherent_map_type(guc_to_gt(guc)->i915,
- vma->obj, true));
+ intel_gt_coherent_map_type(guc_to_gt(guc),
+ vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index 0ff864da92df..331cec07c125 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -301,7 +301,6 @@ guc_capture_alloc_steered_lists(struct intel_guc *guc,
const struct __guc_mmio_reg_descr_group *list;
struct __guc_mmio_reg_descr_group *extlists;
struct __guc_mmio_reg_descr *extarray;
- struct sseu_dev_info *sseu;
bool has_xehpg_extregs;
/* steered registers currently only exist for the render-class */
@@ -318,7 +317,6 @@ guc_capture_alloc_steered_lists(struct intel_guc *guc,
if (has_xehpg_extregs)
num_steer_regs += ARRAY_SIZE(xehpg_extregs);
- sseu = &gt->info.sseu;
for_each_ss_steering(iter, gt, slice, subslice)
num_tot_regs += num_steer_regs;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index f28a3a83742d..97eadd08181d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -960,10 +960,6 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
/* now update descriptor */
WRITE_ONCE(desc->head, head);
- /*
- * Wa_22016122933: Making sure the head update is
- * visible to GuC right away
- */
intel_guc_write_barrier(ct_to_guc(ct));
return available - len;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 364d0d546ec8..0f79cb658518 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -251,9 +251,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
if (ret == 0)
ret = -ENXIO;
} else if (delta_ms > 200) {
- guc_warn(guc, "excessive init time: %lldms! [freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d]\n",
- delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),
- before_freq, status, count, ret);
+ guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
+ delta_ms, status, count, ret);
+ guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
+ intel_rps_read_actual_frequency(&uncore->gt->rps), before_freq,
+ intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index 852bea0208ce..cc9569af7f0c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -94,7 +94,7 @@ static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig
static bool has_table(struct drm_i915_private *i915)
{
- if (IS_ALDERLAKE_P(i915) && !IS_ADLP_N(i915))
+ if (IS_ALDERLAKE_P(i915) && !IS_ALDERLAKE_P_N(i915))
return true;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return true;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 01b75529311c..477df260ae3a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -470,12 +470,19 @@ int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
ret = slpc_set_param(slpc,
SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
val);
- if (ret)
+ if (ret) {
guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
val, ERR_PTR(ret));
- else
+ } else {
slpc->ignore_eff_freq = val;
+ /* Set min to RPn when we disable efficient freq */
+ if (val)
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ slpc->min_freq);
+ }
+
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&slpc->lock);
return ret;
@@ -602,11 +609,10 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
return ret;
if (!slpc->min_freq_softlimit) {
- ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
- if (unlikely(ret))
- return ret;
+ /* Min softlimit is initialized to RPn */
+ slpc->min_freq_softlimit = slpc->min_freq;
slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
- } else if (slpc->min_freq_softlimit != slpc->min_freq) {
+ } else {
return intel_guc_slpc_set_min_freq(slpc,
slpc->min_freq_softlimit);
}
@@ -755,6 +761,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
return ret;
}
+ /* Set cached value of ignore efficient freq */
+ intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
+
/* Revert SLPC min/max to softlimits if necessary */
ret = slpc_set_softlimits(slpc);
if (unlikely(ret)) {
@@ -765,9 +774,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
- /* Set cached value of ignore efficient freq */
- intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index e0afd8f89502..ba9e07fc2b57 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -26,6 +26,7 @@
* The kernel driver is only responsible for loading the HuC firmware and
* triggering its security authentication. This is done differently depending
* on the platform:
+ *
* - older platforms (from Gen9 to most Gen12s): the load is performed via DMA
* and the authentication via GuC
* - DG2: load and authentication are both performed via GSC.
@@ -33,6 +34,7 @@
* not-DG2 older platforms), while the authentication is done in 2-steps,
* a first auth for clear-media workloads via GuC and a second one for all
* workloads via GSC.
+ *
* On platforms where the GuC does the authentication, to correctly do so the
* HuC binary must be loaded before the GuC one.
* Loading the HuC is optional; however, not using the HuC might negatively
@@ -265,7 +267,7 @@ static bool vcs_supported(struct intel_gt *gt)
GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
if (gt_is_root(gt))
- mask = RUNTIME_INFO(gt->i915)->platform_engine_mask;
+ mask = INTEL_INFO(gt->i915)->platform_engine_mask;
else
mask = gt->info.engine_mask;
@@ -308,9 +310,9 @@ void intel_huc_init_early(struct intel_huc *huc)
huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL;
huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL;
} else {
- huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS5(MTL_GSC_HECI1_BASE);
- huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI_FWSTS5_HUC_AUTH_DONE;
- huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI_FWSTS5_HUC_AUTH_DONE;
+ huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS(MTL_GSC_HECI1_BASE, 5);
+ huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI1_FWSTS5_HUC_AUTH_DONE;
+ huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI1_FWSTS5_HUC_AUTH_DONE;
}
}
@@ -384,6 +386,7 @@ int intel_huc_init(struct intel_huc *huc)
vma = intel_guc_allocate_vma(&gt->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2);
if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
huc_info(huc, "Failed to allocate heci pkt\n");
goto out;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index e608152fecfc..b648238cc675 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -27,7 +27,6 @@ struct mtl_huc_auth_msg_out {
int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc)
{
struct intel_gt *gt = huc_to_gt(huc);
- struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
struct mtl_huc_auth_msg_in *msg_in;
struct mtl_huc_auth_msg_out *msg_out;
@@ -43,7 +42,7 @@ int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc)
pkt_offset = i915_ggtt_offset(huc->heci_pkt);
pkt_vaddr = i915_gem_object_pin_map_unlocked(obj,
- i915_coherent_map_type(i915, obj, true));
+ intel_gt_coherent_map_type(gt, obj, true));
if (IS_ERR(pkt_vaddr))
return PTR_ERR(pkt_vaddr);
@@ -107,15 +106,6 @@ out_unpin:
return err;
}
-static void get_version_from_gsc_manifest(struct intel_uc_fw_ver *ver, const void *data)
-{
- const struct intel_gsc_manifest_header *manifest = data;
-
- ver->major = manifest->fw_version.major;
- ver->minor = manifest->fw_version.minor;
- ver->patch = manifest->fw_version.hotfix;
-}
-
static bool css_valid(const void *data, size_t size)
{
const struct uc_css_header *css = data;
@@ -227,8 +217,8 @@ int intel_huc_fw_get_binary_info(struct intel_uc_fw *huc_fw, const void *data, s
for (i = 0; i < header->num_of_entries; i++, entry++) {
if (strcmp(entry->name, "HUCP.man") == 0)
- get_version_from_gsc_manifest(&huc_fw->file_selected.ver,
- data + entry_offset(entry));
+ intel_uc_fw_version_from_gsc_manifest(&huc_fw->file_selected.ver,
+ data + entry_offset(entry));
if (strcmp(entry->name, "huc_fw") == 0) {
u32 offset = entry_offset(entry);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 18250fb64bd8..98b103375b7a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -43,7 +43,7 @@ static void uc_expand_default_options(struct intel_uc *uc)
}
/* Intermediate platforms are HuC authentication only */
- if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) {
+ if (IS_ALDERLAKE_S(i915) && !IS_RAPTORLAKE_S(i915)) {
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
return;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
index 2f93cc4e408a..6d541c866edb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
@@ -10,6 +10,7 @@
#include "gt/intel_gt_debugfs.h"
#include "intel_guc_debugfs.h"
+#include "intel_gsc_uc_debugfs.h"
#include "intel_huc_debugfs.h"
#include "intel_uc.h"
#include "intel_uc_debugfs.h"
@@ -58,6 +59,7 @@ void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc);
+ intel_gsc_uc_debugfs_register(&uc->gsc, root);
intel_guc_debugfs_register(&uc->guc, root);
intel_huc_debugfs_register(&uc->huc, root);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 944725e62414..8be005de1d28 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,7 +11,10 @@
#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
+#include "intel_gsc_binary_headers.h"
+#include "intel_gsc_fw.h"
#include "intel_uc_fw.h"
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
@@ -277,7 +280,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
* ADL-S, otherwise the GuC might attempt to fetch a config table that
* does not exist.
*/
- if (IS_ADLP_N(i915))
+ if (IS_ALDERLAKE_P_N(i915))
p = INTEL_ALDERLAKE_S;
GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
@@ -468,6 +471,17 @@ static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc
}
}
+void intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver *ver,
+ const void *data)
+{
+ const struct intel_gsc_manifest_header *manifest = data;
+
+ ver->major = manifest->fw_version.major;
+ ver->minor = manifest->fw_version.minor;
+ ver->patch = manifest->fw_version.hotfix;
+ ver->build = manifest->fw_version.build;
+}
+
/**
* intel_uc_fw_init_early - initialize the uC object and select the firmware
* @uc_fw: uC firmware
@@ -668,13 +682,18 @@ static int check_gsc_manifest(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
- if (uc_fw->type != INTEL_UC_FW_TYPE_HUC) {
- gt_err(gt, "trying to GSC-parse a non-HuC binary");
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_HUC:
+ intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+ break;
+ case INTEL_UC_FW_TYPE_GSC:
+ intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+ break;
+ default:
+ MISSING_CASE(uc_fw->type);
return -EINVAL;
}
- intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
-
if (uc_fw->dma_start_offset) {
u32 delta = uc_fw->dma_start_offset;
@@ -734,10 +753,6 @@ static int check_fw_header(struct intel_gt *gt,
{
int err = 0;
- /* GSC FW version is queried after the FW is loaded */
- if (uc_fw->type == INTEL_UC_FW_TYPE_GSC)
- return 0;
-
if (uc_fw->has_gsc_headers)
err = check_gsc_manifest(gt, fw, uc_fw);
else
@@ -773,6 +788,80 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **
return 0;
}
+static int check_mtl_huc_guc_compatibility(struct intel_gt *gt,
+ struct intel_uc_fw_file *huc_selected)
+{
+ struct intel_uc_fw_file *guc_selected = &gt->uc.guc.fw.file_selected;
+ struct intel_uc_fw_ver *huc_ver = &huc_selected->ver;
+ struct intel_uc_fw_ver *guc_ver = &guc_selected->ver;
+ bool new_huc, new_guc;
+
+ /* we can only do this check after having fetched both GuC and HuC */
+ GEM_BUG_ON(!huc_selected->path || !guc_selected->path);
+
+ /*
+ * Due to changes in the authentication flow for MTL, HuC 8.5.1 or newer
+ * requires GuC 70.7.0 or newer. Older HuC binaries will instead require
+ * GuC < 70.7.0.
+ */
+ new_huc = huc_ver->major > 8 ||
+ (huc_ver->major == 8 && huc_ver->minor > 5) ||
+ (huc_ver->major == 8 && huc_ver->minor == 5 && huc_ver->patch >= 1);
+
+ new_guc = guc_ver->major > 70 ||
+ (guc_ver->major == 70 && guc_ver->minor >= 7);
+
+ if (new_huc != new_guc) {
+ UNEXPECTED(gt, "HuC %u.%u.%u is incompatible with GuC %u.%u.%u\n",
+ huc_ver->major, huc_ver->minor, huc_ver->patch,
+ guc_ver->major, guc_ver->minor, guc_ver->patch);
+ gt_info(gt, "MTL GuC 70.7.0+ and HuC 8.5.1+ don't work with older releases\n");
+ return -ENOEXEC;
+ }
+
+ return 0;
+}
+
+int intel_uc_check_file_version(struct intel_uc_fw *uc_fw, bool *old_ver)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ struct intel_uc_fw_file *wanted = &uc_fw->file_wanted;
+ struct intel_uc_fw_file *selected = &uc_fw->file_selected;
+ int ret;
+
+ /*
+ * MTL has some compatibility issues with early GuC/HuC binaries
+ * not working with newer ones. This is specific to MTL and we
+ * don't expect it to extend to other platforms.
+ */
+ if (IS_METEORLAKE(gt->i915) && uc_fw->type == INTEL_UC_FW_TYPE_HUC) {
+ ret = check_mtl_huc_guc_compatibility(gt, selected);
+ if (ret)
+ return ret;
+ }
+
+ if (!wanted->ver.major || !selected->ver.major)
+ return 0;
+
+ /* Check the file's major version was as it claimed */
+ if (selected->ver.major != wanted->ver.major) {
+ UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), selected->path,
+ selected->ver.major, selected->ver.minor,
+ wanted->ver.major, wanted->ver.minor);
+ if (!intel_uc_fw_is_overridden(uc_fw))
+ return -ENOEXEC;
+ } else if (old_ver) {
+ if (selected->ver.minor < wanted->ver.minor)
+ *old_ver = true;
+ else if ((selected->ver.minor == wanted->ver.minor) &&
+ (selected->ver.patch < wanted->ver.patch))
+ *old_ver = true;
+ }
+
+ return 0;
+}
+
/**
* intel_uc_fw_fetch - fetch uC firmware
* @uc_fw: uC firmware
@@ -840,25 +929,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
goto fail;
}
- if (uc_fw->file_wanted.ver.major && uc_fw->file_selected.ver.major) {
- /* Check the file's major version was as it claimed */
- if (uc_fw->file_selected.ver.major != uc_fw->file_wanted.ver.major) {
- UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
- uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
- if (!intel_uc_fw_is_overridden(uc_fw)) {
- err = -ENOEXEC;
- goto fail;
- }
- } else {
- if (uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor)
- old_ver = true;
- else if ((uc_fw->file_selected.ver.minor == uc_fw->file_wanted.ver.minor) &&
- (uc_fw->file_selected.ver.patch < uc_fw->file_wanted.ver.patch))
- old_ver = true;
- }
- }
+ err = intel_uc_check_file_version(uc_fw, &old_ver);
+ if (err)
+ goto fail;
if (old_ver && uc_fw->file_selected.ver.major) {
/* Preserve the version that was really wanted */
@@ -1125,7 +1198,7 @@ static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
return PTR_ERR(vma);
vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
- i915_coherent_map_type(gt->i915, vma->obj, true));
+ intel_gt_coherent_map_type(gt, vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
err = PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 054f02811971..9a431726c8d5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -70,6 +70,7 @@ struct intel_uc_fw_ver {
u32 major;
u32 minor;
u32 patch;
+ u32 build;
};
/*
@@ -289,6 +290,9 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_get_upload_size(uc_fw);
}
+void intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver *ver,
+ const void *data);
+int intel_uc_check_file_version(struct intel_uc_fw *uc_fw, bool *old_ver);
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_type type,
bool needs_ggtt_mapping);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index 1fd760539f77..bfb72143566f 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -204,9 +204,9 @@ static int intel_guc_steal_guc_ids(void *arg)
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
rq = NULL;
- if (ret != -EAGAIN) {
- guc_err(guc, "Failed to create request %d: %pe\n",
- context_index, ERR_PTR(ret));
+ if ((ret != -EAGAIN) || !last) {
+ guc_err(guc, "Failed to create %srequest %d: %pe\n",
+ last ? "" : "first ", context_index, ERR_PTR(ret));
goto err_spin_rq;
}
} else {
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 3c4ae1da0d41..05f9348b7a9d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2833,7 +2833,7 @@ static int command_scan(struct parser_exec_state *s,
static int scan_workload(struct intel_vgpu_workload *workload)
{
- unsigned long gma_head, gma_tail, gma_bottom;
+ unsigned long gma_head, gma_tail;
struct parser_exec_state s;
int ret = 0;
@@ -2843,7 +2843,6 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head = workload->rb_start + workload->rb_head;
gma_tail = workload->rb_start + workload->rb_tail;
- gma_bottom = workload->rb_start + _RING_CTL_BUF_SIZE(workload->rb_ctl);
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
@@ -2874,7 +2873,7 @@ out:
static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
+ unsigned long gma_head, gma_tail, ring_size, ring_tail;
struct parser_exec_state s;
int ret = 0;
struct intel_vgpu_workload *workload = container_of(wa_ctx,
@@ -2891,7 +2890,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
PAGE_SIZE);
gma_head = wa_ctx->indirect_ctx.guest_gma;
gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
- gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 2a0438f12a14..af9afdb53c7f 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -491,7 +491,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
return;
}
- msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, reg);
+ msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, value);
// check the msg in DATA register.
msg = vgpu_vreg(vgpu, offset + 4);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f4055804aad1..a5c8005ec484 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -974,7 +974,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0)
+ if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
context_page_num = 19;
context_base = (void *) ctx->lrc_reg_state -
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 8ef93889061a..5ec293011d99 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -449,8 +449,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
}
} while (unlikely(is_barrier(active)));
- if (!__i915_active_fence_set(active, fence))
+ fence = __i915_active_fence_set(active, fence);
+ if (!fence)
__i915_active_acquire(ref);
+ else
+ dma_fence_put(fence);
out:
i915_active_release(ref);
@@ -469,13 +472,9 @@ __i915_active_set_fence(struct i915_active *ref,
return NULL;
}
- rcu_read_lock();
prev = __i915_active_fence_set(active, fence);
- if (prev)
- prev = dma_fence_get_rcu(prev);
- else
+ if (!prev)
__i915_active_acquire(ref);
- rcu_read_unlock();
return prev;
}
@@ -1019,10 +1018,11 @@ void i915_request_add_active_barriers(struct i915_request *rq)
*
* Records the new @fence as the last active fence along its timeline in
* this active tracker, moving the tracking callbacks from the previous
- * fence onto this one. Returns the previous fence (if not already completed),
- * which the caller must ensure is executed before the new fence. To ensure
- * that the order of fences within the timeline of the i915_active_fence is
- * understood, it should be locked by the caller.
+ * fence onto this one. Gets and returns a reference to the previous fence
+ * (if not already completed), which the caller must put after making sure
+ * that it is executed before the new fence. To ensure that the order of
+ * fences within the timeline of the i915_active_fence is understood, it
+ * should be locked by the caller.
*/
struct dma_fence *
__i915_active_fence_set(struct i915_active_fence *active,
@@ -1031,7 +1031,23 @@ __i915_active_fence_set(struct i915_active_fence *active,
struct dma_fence *prev;
unsigned long flags;
- if (fence == rcu_access_pointer(active->fence))
+ /*
+ * In case of fences embedded in i915_requests, their memory is
+ * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
+ * by new requests. Then, there is a risk of passing back a pointer
+ * to a new, completely unrelated fence that reuses the same memory
+ * while tracked under a different active tracker. Combined with i915
+ * perf open/close operations that build await dependencies between
+ * engine kernel context requests and user requests from different
+ * timelines, this can lead to dependency loops and infinite waits.
+ *
+ * As a countermeasure, we try to get a reference to the active->fence
+ * first, so if we succeed and pass it back to our user then it is not
+ * released and potentially reused by an unrelated request before the
+ * user has a chance to set up an await dependency on it.
+ */
+ prev = i915_active_fence_get(active);
+ if (fence == prev)
return fence;
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
@@ -1040,27 +1056,56 @@ __i915_active_fence_set(struct i915_active_fence *active,
* Consider that we have two threads arriving (A and B), with
* C already resident as the active->fence.
*
- * A does the xchg first, and so it sees C or NULL depending
- * on the timing of the interrupt handler. If it is NULL, the
- * previous fence must have been signaled and we know that
- * we are first on the timeline. If it is still present,
- * we acquire the lock on that fence and serialise with the interrupt
- * handler, in the process removing it from any future interrupt
- * callback. A will then wait on C before executing (if present).
- *
- * As B is second, it sees A as the previous fence and so waits for
- * it to complete its transition and takes over the occupancy for
- * itself -- remembering that it needs to wait on A before executing.
+ * Both A and B have got a reference to C or NULL, depending on the
+ * timing of the interrupt handler. Let's assume that if A has got C
+ * then it has locked C first (before B).
*
* Note the strong ordering of the timeline also provides consistent
* nesting rules for the fence->lock; the inner lock is always the
* older lock.
*/
spin_lock_irqsave(fence->lock, flags);
- prev = xchg(__active_fence_slot(active), fence);
- if (prev) {
- GEM_BUG_ON(prev == fence);
+ if (prev)
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+
+ /*
+ * A does the cmpxchg first, and so it sees C or NULL, as before, or
+ * something else, depending on the timing of other threads and/or
+ * interrupt handler. If not the same as before then A unlocks C if
+ * applicable and retries, starting from an attempt to get a new
+ * active->fence. Meanwhile, B follows the same path as A.
+ * Once A succeeds with cmpxch, B fails again, retires, gets A from
+ * active->fence, locks it as soon as A completes, and possibly
+ * succeeds with cmpxchg.
+ */
+ while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
+ if (prev) {
+ spin_unlock(prev->lock);
+ dma_fence_put(prev);
+ }
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ prev = i915_active_fence_get(active);
+ GEM_BUG_ON(prev == fence);
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (prev)
+ spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+ }
+
+ /*
+ * If prev is NULL then the previous fence must have been signaled
+ * and we know that we are first on the timeline. If it is still
+ * present then, having the lock on that fence already acquired, we
+ * serialise with the interrupt handler, in the process of removing it
+ * from any future interrupt callback. A will then wait on C before
+ * executing (if present).
+ *
+ * As B is second, it sees A as the previous fence and so waits for
+ * it to complete its transition and takes over the occupancy for
+ * itself -- remembering that it needs to wait on A before executing.
+ */
+ if (prev) {
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
}
@@ -1077,11 +1122,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
int err = 0;
/* Must maintain timeline ordering wrt previous active requests */
- rcu_read_lock();
fence = __i915_active_fence_set(active, &rq->fence);
- if (fence) /* but the previous fence may not belong to that timeline! */
- fence = dma_fence_get_rcu(fence);
- rcu_read_unlock();
if (fence) {
err = i915_request_await_dma_fence(rq, fence);
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 76ccd4e03e31..4de44cf1026d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -67,6 +67,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
+ intel_display_device_info_print(DISPLAY_INFO(i915), DISPLAY_RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
intel_driver_caps_print(&i915->caps, &p);
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 97244541ec28..b870c0df081a 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -132,8 +132,20 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
if (dev_priv->display.hotplug.dp_wq == NULL)
goto out_free_wq;
+ /*
+ * The unordered i915 workqueue should be used for all work
+ * scheduling that do not require running in order, which used
+ * to be scheduled on the system_wq before moving to a driver
+ * instance due deprecation of flush_scheduled_work().
+ */
+ dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0);
+ if (dev_priv->unordered_wq == NULL)
+ goto out_free_dp_wq;
+
return 0;
+out_free_dp_wq:
+ destroy_workqueue(dev_priv->display.hotplug.dp_wq);
out_free_wq:
destroy_workqueue(dev_priv->wq);
out_err:
@@ -144,6 +156,7 @@ out_err:
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
+ destroy_workqueue(dev_priv->unordered_wq);
destroy_workqueue(dev_priv->display.hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
}
@@ -162,7 +175,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
bool pre = false;
- pre |= IS_HSW_EARLY_SDV(dev_priv);
+ pre |= IS_HASWELL_EARLY_SDV(dev_priv);
pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6;
pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA;
pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
@@ -698,6 +711,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
intel_device_info_print(INTEL_INFO(dev_priv),
RUNTIME_INFO(dev_priv), &p);
+ intel_display_device_info_print(DISPLAY_INFO(dev_priv),
+ DISPLAY_RUNTIME_INFO(dev_priv), &p);
i915_print_iommu_status(dev_priv, &p);
for_each_gt(gt, dev_priv, i)
intel_gt_info_print(&gt->info, &p);
@@ -759,8 +774,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i915 = i915_driver_create(pdev, ent);
if (IS_ERR(i915)) {
- ret = PTR_ERR(i915);
- goto out_pci_disable;
+ pci_disable_device(pdev);
+ return PTR_ERR(i915);
}
ret = i915_driver_early_probe(i915);
@@ -1803,10 +1818,8 @@ static const struct drm_driver i915_drm_driver = {
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
- .show_fdinfo = i915_drm_client_fdinfo,
+ .show_fdinfo = PTR_IF(IS_ENABLED(CONFIG_PROC_FS), i915_drm_client_fdinfo),
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
index 4c18b99e10a4..67816c912bca 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.h
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -47,8 +47,6 @@ static inline void i915_drm_client_put(struct i915_drm_client *client)
struct i915_drm_client *i915_drm_client_alloc(void);
-#ifdef CONFIG_PROC_FS
void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file);
-#endif
#endif /* !__I915_DRM_CLIENT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b457a37e67c4..7a8ce7239bc9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -203,9 +203,8 @@ struct drm_i915_private {
/* i915 device parameters */
struct i915_params params;
- const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
+ const struct intel_device_info *__info; /* Use INTEL_INFO() to access. */
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
- struct intel_display_runtime_info __display_runtime; /* Access with DISPLAY_RUNTIME_INFO() */
struct intel_driver_caps caps;
struct i915_dsm dsm;
@@ -260,6 +259,16 @@ struct drm_i915_private {
*/
struct workqueue_struct *wq;
+ /**
+ * unordered_wq - internal workqueue for unordered work
+ *
+ * This workqueue should be used for all unordered work
+ * scheduling within i915, which used to be scheduled on the
+ * system_wq before moving to a driver instance due
+ * deprecation of flush_scheduled_work().
+ */
+ struct workqueue_struct *unordered_wq;
+
/* pm private clock gating functions */
const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
@@ -314,7 +323,6 @@ struct drm_i915_private {
/*
* i915->gt[0] == &i915->gt0
*/
-#define I915_MAX_GT 2
struct intel_gt *gt[I915_MAX_GT];
struct kobject *sysfs_gt;
@@ -406,10 +414,10 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
(engine__) && (engine__)->uabi_class == (class__); \
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-#define INTEL_INFO(i915) (&(i915)->__info)
-#define DISPLAY_INFO(i915) (INTEL_INFO(i915)->display)
+#define INTEL_INFO(i915) ((i915)->__info)
#define RUNTIME_INFO(i915) (&(i915)->__runtime)
-#define DISPLAY_RUNTIME_INFO(i915) (&(i915)->__display_runtime)
+#define DISPLAY_INFO(i915) ((i915)->display.info.__device_info)
+#define DISPLAY_RUNTIME_INFO(i915) (&(i915)->display.info.__runtime_info)
#define DRIVER_CAPS(i915) (&(i915)->caps)
#define INTEL_DEVID(i915) (RUNTIME_INFO(i915)->device_id)
@@ -553,8 +561,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_COFFEELAKE(i915) IS_PLATFORM(i915, INTEL_COFFEELAKE)
#define IS_COMETLAKE(i915) IS_PLATFORM(i915, INTEL_COMETLAKE)
#define IS_ICELAKE(i915) IS_PLATFORM(i915, INTEL_ICELAKE)
-#define IS_JSL_EHL(i915) (IS_PLATFORM(i915, INTEL_JASPERLAKE) || \
- IS_PLATFORM(i915, INTEL_ELKHARTLAKE))
+#define IS_JASPERLAKE(i915) IS_PLATFORM(i915, INTEL_JASPERLAKE)
+#define IS_ELKHARTLAKE(i915) IS_PLATFORM(i915, INTEL_ELKHARTLAKE)
#define IS_TIGERLAKE(i915) IS_PLATFORM(i915, INTEL_TIGERLAKE)
#define IS_ROCKETLAKE(i915) IS_PLATFORM(i915, INTEL_ROCKETLAKE)
#define IS_DG1(i915) IS_PLATFORM(i915, INTEL_DG1)
@@ -575,105 +583,77 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G11)
#define IS_DG2_G12(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G12)
-#define IS_ADLS_RPLS(i915) \
+#define IS_RAPTORLAKE_S(i915) \
IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
-#define IS_ADLP_N(i915) \
+#define IS_ALDERLAKE_P_N(i915) \
IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
-#define IS_ADLP_RPLP(i915) \
+#define IS_RAPTORLAKE_P(i915) \
IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
-#define IS_ADLP_RPLU(i915) \
+#define IS_RAPTORLAKE_U(i915) \
IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU)
-#define IS_HSW_EARLY_SDV(i915) (IS_HASWELL(i915) && \
+#define IS_HASWELL_EARLY_SDV(i915) (IS_HASWELL(i915) && \
(INTEL_DEVID(i915) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(i915) \
+#define IS_BROADWELL_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
-#define IS_BDW_ULX(i915) \
+#define IS_BROADWELL_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
-#define IS_BDW_GT3(i915) (IS_BROADWELL(i915) && \
+#define IS_BROADWELL_GT3(i915) (IS_BROADWELL(i915) && \
INTEL_INFO(i915)->gt == 3)
-#define IS_HSW_ULT(i915) \
+#define IS_HASWELL_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
-#define IS_HSW_GT3(i915) (IS_HASWELL(i915) && \
+#define IS_HASWELL_GT3(i915) (IS_HASWELL(i915) && \
INTEL_INFO(i915)->gt == 3)
-#define IS_HSW_GT1(i915) (IS_HASWELL(i915) && \
+#define IS_HASWELL_GT1(i915) (IS_HASWELL(i915) && \
INTEL_INFO(i915)->gt == 1)
/* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(i915) \
+#define IS_HASWELL_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_ULT(i915) \
+#define IS_SKYLAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_SKL_ULX(i915) \
+#define IS_SKYLAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_KBL_ULT(i915) \
+#define IS_KABYLAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_KBL_ULX(i915) \
+#define IS_KABYLAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_GT2(i915) (IS_SKYLAKE(i915) && \
+#define IS_SKYLAKE_GT2(i915) (IS_SKYLAKE(i915) && \
INTEL_INFO(i915)->gt == 2)
-#define IS_SKL_GT3(i915) (IS_SKYLAKE(i915) && \
+#define IS_SKYLAKE_GT3(i915) (IS_SKYLAKE(i915) && \
INTEL_INFO(i915)->gt == 3)
-#define IS_SKL_GT4(i915) (IS_SKYLAKE(i915) && \
+#define IS_SKYLAKE_GT4(i915) (IS_SKYLAKE(i915) && \
INTEL_INFO(i915)->gt == 4)
-#define IS_KBL_GT2(i915) (IS_KABYLAKE(i915) && \
+#define IS_KABYLAKE_GT2(i915) (IS_KABYLAKE(i915) && \
INTEL_INFO(i915)->gt == 2)
-#define IS_KBL_GT3(i915) (IS_KABYLAKE(i915) && \
+#define IS_KABYLAKE_GT3(i915) (IS_KABYLAKE(i915) && \
INTEL_INFO(i915)->gt == 3)
-#define IS_CFL_ULT(i915) \
+#define IS_COFFEELAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_CFL_ULX(i915) \
+#define IS_COFFEELAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_CFL_GT2(i915) (IS_COFFEELAKE(i915) && \
+#define IS_COFFEELAKE_GT2(i915) (IS_COFFEELAKE(i915) && \
INTEL_INFO(i915)->gt == 2)
-#define IS_CFL_GT3(i915) (IS_COFFEELAKE(i915) && \
+#define IS_COFFEELAKE_GT3(i915) (IS_COFFEELAKE(i915) && \
INTEL_INFO(i915)->gt == 3)
-#define IS_CML_ULT(i915) \
+#define IS_COMETLAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_CML_ULX(i915) \
+#define IS_COMETLAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_CML_GT2(i915) (IS_COMETLAKE(i915) && \
+#define IS_COMETLAKE_GT2(i915) (IS_COMETLAKE(i915) && \
INTEL_INFO(i915)->gt == 2)
#define IS_ICL_WITH_PORT_F(i915) \
IS_SUBPLATFORM(i915, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
-#define IS_TGL_UY(i915) \
+#define IS_TIGERLAKE_UY(i915) \
IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
-#define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_KBL_GRAPHICS_STEP(i915, since, until) \
- (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, since, until))
-#define IS_KBL_DISPLAY_STEP(i915, since, until) \
- (IS_KABYLAKE(i915) && IS_DISPLAY_STEP(i915, since, until))
-#define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \
- (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
- (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
-#define IS_TGL_DISPLAY_STEP(__i915, since, until) \
- (IS_TIGERLAKE(__i915) && \
- IS_DISPLAY_STEP(__i915, since, until))
-#define IS_RKL_DISPLAY_STEP(p, since, until) \
- (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
-#define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
- (IS_ALDERLAKE_S(__i915) && \
- IS_DISPLAY_STEP(__i915, since, until))
-#define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \
- (IS_ALDERLAKE_S(__i915) && \
- IS_GRAPHICS_STEP(__i915, since, until))
-
-#define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
- (IS_ALDERLAKE_P(__i915) && \
- IS_DISPLAY_STEP(__i915, since, until))
-
-#define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \
- (IS_ALDERLAKE_P(__i915) && \
- IS_GRAPHICS_STEP(__i915, since, until))
#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
(IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
@@ -791,7 +771,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(i915) \
- (IS_SKL_GT3(i915) || IS_SKL_GT4(i915))
+ (IS_SKYLAKE_GT3(i915) || IS_SKYLAKE_GT4(i915))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -829,7 +809,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
*/
#define HAS_64K_PAGES(i915) (INTEL_INFO(i915)->has_64k_pages)
-#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
#define HAS_EXTRA_GT_LIST(i915) (INTEL_INFO(i915)->extra_gt_list)
@@ -852,7 +832,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(i915) (INTEL_INFO(i915)->has_l3_dpf)
-#define NUM_L3_SLICES(i915) (IS_HSW_GT3(i915) ? \
+#define NUM_L3_SLICES(i915) (IS_HASWELL_GT3(i915) ? \
2 : HAS_L3_DPF(i915))
/* Only valid when HAS_DISPLAY() is true */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index ec368e700235..4008bb09fdb5 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -187,64 +187,64 @@ i915_error_printer(struct drm_i915_error_state_buf *e)
}
/* single threaded page allocator with a reserved stash for emergencies */
-static void pool_fini(struct pagevec *pv)
+static void pool_fini(struct folio_batch *fbatch)
{
- pagevec_release(pv);
+ folio_batch_release(fbatch);
}
-static int pool_refill(struct pagevec *pv, gfp_t gfp)
+static int pool_refill(struct folio_batch *fbatch, gfp_t gfp)
{
- while (pagevec_space(pv)) {
- struct page *p;
+ while (folio_batch_space(fbatch)) {
+ struct folio *folio;
- p = alloc_page(gfp);
- if (!p)
+ folio = folio_alloc(gfp, 0);
+ if (!folio)
return -ENOMEM;
- pagevec_add(pv, p);
+ folio_batch_add(fbatch, folio);
}
return 0;
}
-static int pool_init(struct pagevec *pv, gfp_t gfp)
+static int pool_init(struct folio_batch *fbatch, gfp_t gfp)
{
int err;
- pagevec_init(pv);
+ folio_batch_init(fbatch);
- err = pool_refill(pv, gfp);
+ err = pool_refill(fbatch, gfp);
if (err)
- pool_fini(pv);
+ pool_fini(fbatch);
return err;
}
-static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
+static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp)
{
- struct page *p;
+ struct folio *folio;
- p = alloc_page(gfp);
- if (!p && pagevec_count(pv))
- p = pv->pages[--pv->nr];
+ folio = folio_alloc(gfp, 0);
+ if (!folio && folio_batch_count(fbatch))
+ folio = fbatch->folios[--fbatch->nr];
- return p ? page_address(p) : NULL;
+ return folio ? folio_address(folio) : NULL;
}
-static void pool_free(struct pagevec *pv, void *addr)
+static void pool_free(struct folio_batch *fbatch, void *addr)
{
- struct page *p = virt_to_page(addr);
+ struct folio *folio = virt_to_folio(addr);
- if (pagevec_space(pv))
- pagevec_add(pv, p);
+ if (folio_batch_space(fbatch))
+ folio_batch_add(fbatch, folio);
else
- __free_page(p);
+ folio_put(folio);
}
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
struct i915_vma_compress {
- struct pagevec pool;
+ struct folio_batch pool;
struct z_stream_s zstream;
void *tmp;
};
@@ -381,7 +381,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#else
struct i915_vma_compress {
- struct pagevec pool;
+ struct folio_batch pool;
};
static bool compress_init(struct i915_vma_compress *c)
@@ -649,6 +649,8 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
struct drm_printer p = i915_error_printer(m);
intel_device_info_print(&error->device_info, &error->runtime_info, &p);
+ intel_display_device_info_print(&error->display_device_info,
+ &error->display_runtime_info, &p);
intel_driver_caps_print(&error->driver_caps, &p);
}
@@ -1173,9 +1175,9 @@ i915_vma_coredump_create(const struct intel_gt *gt,
drm_clflush_pages(&page, 1);
- s = kmap(page);
+ s = kmap_local_page(page);
ret = compress_page(compress, s, dst, false);
- kunmap(page);
+ kunmap_local(s);
drm_clflush_pages(&page, 1);
@@ -1983,6 +1985,10 @@ static void capture_gen(struct i915_gpu_coredump *error)
memcpy(&error->runtime_info,
RUNTIME_INFO(i915),
sizeof(error->runtime_info));
+ memcpy(&error->display_device_info, DISPLAY_INFO(i915),
+ sizeof(error->display_device_info));
+ memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915),
+ sizeof(error->display_runtime_info));
error->driver_caps = i915->caps;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index a78c061ce26f..9f5971f5e980 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -14,6 +14,7 @@
#include <drm/drm_mm.h>
+#include "display/intel_display_device.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
#include "gt/uc/intel_uc_fw.h"
@@ -209,6 +210,8 @@ struct i915_gpu_coredump {
struct intel_device_info device_info;
struct intel_runtime_info runtime_info;
+ struct intel_display_device_info display_device_info;
+ struct intel_display_runtime_info display_runtime_info;
struct intel_driver_caps driver_caps;
struct i915_params params;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 82fbabcdd7a5..1bfcfbe6e30b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -423,7 +423,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
- void __iomem * const regs = i915->uncore.regs;
+ void __iomem * const regs = intel_uncore_regs(&i915->uncore);
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -511,7 +511,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
- void __iomem * const regs = dev_priv->uncore.regs;
+ void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
u32 master_ctl;
if (!intel_irqs_enabled(dev_priv))
@@ -561,7 +561,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
- void __iomem * const regs = i915->uncore.regs;
+ void __iomem * const regs = intel_uncore_regs(&i915->uncore);
struct intel_gt *gt = to_gt(i915);
u32 master_ctl;
u32 gu_misc_iir;
@@ -619,7 +619,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = arg;
struct intel_gt *gt = to_gt(i915);
- void __iomem * const regs = gt->uncore->regs;
+ void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 master_tile_ctl, master_ctl;
u32 gu_misc_iir;
@@ -711,7 +711,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- gen8_master_intr_disable(uncore->regs);
+ gen8_master_intr_disable(intel_uncore_regs(uncore));
gen8_gt_irq_reset(to_gt(dev_priv));
gen8_display_irq_reset(dev_priv);
@@ -727,7 +727,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
- gen11_master_intr_disable(dev_priv->uncore.regs);
+ gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
gen11_gt_irq_reset(gt);
gen11_display_irq_reset(dev_priv);
@@ -742,7 +742,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
struct intel_gt *gt;
unsigned int i;
- dg1_master_intr_disable(dev_priv->uncore.regs);
+ dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
for_each_gt(gt, dev_priv, i)
gen11_gt_irq_reset(gt);
@@ -772,45 +772,9 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 display_mask, extra_mask;
-
- if (GRAPHICS_VER(dev_priv) >= 7) {
- display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
- DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
- extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
- DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
- DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
- DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
- DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
- DE_DP_A_HOTPLUG_IVB);
- } else {
- display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
- DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
- DE_PIPEA_CRC_DONE | DE_POISON);
- extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
- DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
- DE_PLANE_FLIP_DONE(PLANE_A) |
- DE_PLANE_FLIP_DONE(PLANE_B) |
- DE_DP_A_HOTPLUG);
- }
-
- if (IS_HASWELL(dev_priv)) {
- gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
- display_mask |= DE_EDP_PSR_INT_HSW;
- }
-
- if (IS_IRONLAKE_M(dev_priv))
- extra_mask |= DE_PCU_EVENT;
-
- dev_priv->irq_mask = ~display_mask;
-
- ibx_irq_postinstall(dev_priv);
-
gen5_gt_irq_postinstall(to_gt(dev_priv));
- GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
- display_mask | extra_mask);
+ ilk_de_irq_postinstall(dev_priv);
}
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -828,15 +792,10 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev_priv);
- else if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_postinstall(dev_priv);
-
gen8_gt_irq_postinstall(to_gt(dev_priv));
gen8_de_irq_postinstall(dev_priv);
- gen8_master_intr_enable(dev_priv->uncore.regs);
+ gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -845,15 +804,12 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev_priv);
-
gen11_gt_irq_postinstall(gt);
gen11_de_irq_postinstall(dev_priv);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
- gen11_master_intr_enable(uncore->regs);
+ gen11_master_intr_enable(intel_uncore_regs(uncore));
intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
}
@@ -869,18 +825,9 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
- if (HAS_DISPLAY(dev_priv)) {
- if (DISPLAY_VER(dev_priv) >= 14)
- mtp_irq_postinstall(dev_priv);
- else
- icp_irq_postinstall(dev_priv);
+ dg1_de_irq_postinstall(dev_priv);
- gen8_de_irq_postinstall(dev_priv);
- intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
- GEN11_DISPLAY_IRQ_ENABLE);
- }
-
- dg1_master_intr_enable(uncore->regs);
+ dg1_master_intr_enable(intel_uncore_regs(uncore));
intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
}
@@ -1343,23 +1290,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- dev_priv->drm.vblank_disable_immediate = true;
-
- /* Most platforms treat the display irq block as an always-on
- * power domain. vlv/chv can disable it at runtime and need
- * special care to avoid writing any of the display block registers
- * outside of the power domain. We defer setting up the display irqs
- * in this case to the runtime pm.
- */
- dev_priv->display_irqs_enabled = true;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display_irqs_enabled = false;
-
- intel_hotplug_irq_init(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 3d7a5db9833b..fcacdc21643c 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -38,9 +38,6 @@
#include "i915_reg.h"
#include "intel_pci_config.h"
-__diag_push();
-__diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
-
#define PLATFORM(x) .platform = (x)
#define GEN(x) \
.__runtime.graphics.ip.ver = (x), \
@@ -84,7 +81,7 @@ __diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
.__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K
#define GEN_DEFAULT_REGIONS \
- .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
+ .memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
#define I830_FEATURES \
GEN(2), \
@@ -93,7 +90,7 @@ __diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
.has_3d_pipeline = 1, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .__runtime.platform_engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -108,7 +105,7 @@ __diag_ignore_all("-Woverride-init", "Allow overriding inherited members");
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .__runtime.platform_engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -140,7 +137,7 @@ static const struct intel_device_info i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
.gpu_reset_clobbers_display = true, \
- .__runtime.platform_engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -203,7 +200,7 @@ static const struct intel_device_info pnv_m_info = {
#define GEN4_FEATURES \
GEN(4), \
.gpu_reset_clobbers_display = true, \
- .__runtime.platform_engine_mask = BIT(RCS0), \
+ .platform_engine_mask = BIT(RCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -231,7 +228,7 @@ static const struct intel_device_info i965gm_info = {
static const struct intel_device_info g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -239,13 +236,13 @@ static const struct intel_device_info gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
.is_mobile = 1,
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
#define GEN5_FEATURES \
GEN(5), \
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -271,7 +268,7 @@ static const struct intel_device_info ilk_m_info = {
#define GEN6_FEATURES \
GEN(6), \
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -319,7 +316,7 @@ static const struct intel_device_info snb_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -387,7 +384,7 @@ static const struct intel_device_info vlv_info = {
.__runtime.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
GEN_DEFAULT_PAGE_SIZES,
GEN_DEFAULT_REGIONS,
LEGACY_CACHELEVEL,
@@ -395,7 +392,7 @@ static const struct intel_device_info vlv_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
.has_runtime_pm = 1
@@ -453,7 +450,7 @@ static const struct intel_device_info bdw_rsvd_info = {
static const struct intel_device_info bdw_gt3_info = {
BDW_PLATFORM,
.gt = 3,
- .__runtime.platform_engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -461,7 +458,7 @@ static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
.is_lp = 1,
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
@@ -505,7 +502,7 @@ static const struct intel_device_info skl_gt2_info = {
#define SKL_GT3_PLUS_PLATFORM \
SKL_PLATFORM, \
- .__runtime.platform_engine_mask = \
+ .platform_engine_mask = \
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
@@ -522,7 +519,7 @@ static const struct intel_device_info skl_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.has_3d_pipeline = 1, \
.has_64bit_reloc = 1, \
.has_runtime_pm = 1, \
@@ -568,7 +565,7 @@ static const struct intel_device_info kbl_gt2_info = {
static const struct intel_device_info kbl_gt3_info = {
KBL_PLATFORM,
.gt = 3,
- .__runtime.platform_engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -589,7 +586,7 @@ static const struct intel_device_info cfl_gt2_info = {
static const struct intel_device_info cfl_gt3_info = {
CFL_PLATFORM,
.gt = 3,
- .__runtime.platform_engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -622,21 +619,21 @@ static const struct intel_device_info cml_gt2_info = {
static const struct intel_device_info icl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
- .__runtime.platform_engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
static const struct intel_device_info ehl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
.__runtime.ppgtt_size = 36,
};
static const struct intel_device_info jsl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_JASPERLAKE),
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+ .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
.__runtime.ppgtt_size = 36,
};
@@ -651,19 +648,19 @@ static const struct intel_device_info jsl_info = {
static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
- .__runtime.platform_engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
static const struct intel_device_info rkl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ROCKETLAKE),
- .__runtime.platform_engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
};
#define DGFX_FEATURES \
- .__runtime.memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
+ .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
.has_llc = 0, \
.has_pxp = 0, \
.has_snoop = 1, \
@@ -676,7 +673,7 @@ static const struct intel_device_info dg1_info = {
.__runtime.graphics.ip.rel = 10,
PLATFORM(INTEL_DG1),
.require_force_probe = 1,
- .__runtime.platform_engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
BIT(VCS0) | BIT(VCS2),
/* Wa_16011227922 */
@@ -686,7 +683,7 @@ static const struct intel_device_info dg1_info = {
static const struct intel_device_info adl_s_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ALDERLAKE_S),
- .__runtime.platform_engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
.dma_mask_size = 39,
};
@@ -694,7 +691,7 @@ static const struct intel_device_info adl_s_info = {
static const struct intel_device_info adl_p_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ALDERLAKE_P),
- .__runtime.platform_engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
.__runtime.ppgtt_size = 48,
.dma_mask_size = 39,
@@ -746,7 +743,7 @@ static const struct intel_device_info xehpsdv_info = {
PLATFORM(INTEL_XEHPSDV),
.has_64k_pages = 1,
.has_media_ratio_mode = 1,
- .__runtime.platform_engine_mask =
+ .platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) |
@@ -766,7 +763,7 @@ static const struct intel_device_info xehpsdv_info = {
.has_guc_deprivilege = 1, \
.has_heci_pxp = 1, \
.has_media_ratio_mode = 1, \
- .__runtime.platform_engine_mask = \
+ .platform_engine_mask = \
BIT(RCS0) | BIT(BCS0) | \
BIT(VECS0) | BIT(VECS1) | \
BIT(VCS0) | BIT(VCS2) | \
@@ -801,7 +798,7 @@ static const struct intel_device_info pvc_info = {
PLATFORM(INTEL_PONTEVECCHIO),
.has_flat_ccs = 0,
.max_pat_index = 7,
- .__runtime.platform_engine_mask =
+ .platform_engine_mask =
BIT(BCS0) |
BIT(VCS0) |
BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
@@ -838,16 +835,14 @@ static const struct intel_device_info mtl_info = {
.has_snoop = 1,
.max_pat_index = 4,
.has_pxp = 1,
- .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
- .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
+ .memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
+ .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
.require_force_probe = 1,
MTL_CACHELEVEL,
};
#undef PLATFORM
-__diag_pop();
-
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0a111b281578..04bc1f4a1115 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -868,8 +868,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
oa_report_id_clear(stream, report32);
oa_timestamp_clear(stream, report32);
} else {
+ u8 *oa_buf_end = stream->oa_buffer.vaddr +
+ OA_BUFFER_SIZE;
+ u32 part = oa_buf_end - (u8 *)report32;
+
/* Zero out the entire report */
- memset(report32, 0, report_size);
+ if (report_size <= part) {
+ memset(report32, 0, report_size);
+ } else {
+ memset(report32, 0, part);
+ memset(oa_buf_base, 0, report_size - part);
+ }
}
}
@@ -1310,7 +1319,7 @@ __store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
u32 *cs, cmd;
cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- if (GRAPHICS_VER(rq->engine->i915) >= 8)
+ if (GRAPHICS_VER(rq->i915) >= 8)
cmd++;
cs = intel_ring_begin(rq, 4);
@@ -4422,6 +4431,7 @@ static const struct i915_range mtl_oam_b_counters[] = {
static const struct i915_range xehp_oa_b_counters[] = {
{ .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */
{ .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */
+ {}
};
static const struct i915_range gen7_oa_mux_regs[] = {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 8ed7c39c2b30..aefad14ab27a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -941,8 +941,30 @@
#define HECI_H_GS1(base) _MMIO((base) + 0xc4c)
#define HECI_H_GS1_ER_PREP REG_BIT(0)
-#define HECI_FWSTS5(base) _MMIO((base) + 0xc68)
-#define HECI_FWSTS5_HUC_AUTH_DONE (1 << 19)
+/*
+ * The FWSTS register values are FW defined and can be different between
+ * HECI1 and HECI2
+ */
+#define HECI_FWSTS1 0xc40
+#define HECI1_FWSTS1_CURRENT_STATE REG_GENMASK(3, 0)
+#define HECI1_FWSTS1_CURRENT_STATE_RESET 0
+#define HECI1_FWSTS1_PROXY_STATE_NORMAL 5
+#define HECI1_FWSTS1_INIT_COMPLETE REG_BIT(9)
+#define HECI_FWSTS2 0xc48
+#define HECI_FWSTS3 0xc60
+#define HECI_FWSTS4 0xc64
+#define HECI_FWSTS5 0xc68
+#define HECI1_FWSTS5_HUC_AUTH_DONE (1 << 19)
+#define HECI_FWSTS6 0xc6c
+
+/* the FWSTS regs are 1-based, so we use -base for index 0 to get an invalid reg */
+#define HECI_FWSTS(base, x) _MMIO((base) + _PICK(x, -(base), \
+ HECI_FWSTS1, \
+ HECI_FWSTS2, \
+ HECI_FWSTS3, \
+ HECI_FWSTS4, \
+ HECI_FWSTS5, \
+ HECI_FWSTS6))
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
#define GTT_CACHE_EN_ALL 0xF0007FFF
@@ -4421,8 +4443,10 @@
#define GEN8_DE_MISC_IMR _MMIO(0x44464)
#define GEN8_DE_MISC_IIR _MMIO(0x44468)
#define GEN8_DE_MISC_IER _MMIO(0x4446c)
-#define GEN8_DE_MISC_GSE (1 << 27)
-#define GEN8_DE_EDP_PSR (1 << 19)
+#define XELPDP_PMDEMAND_RSPTOUT_ERR REG_BIT(27)
+#define GEN8_DE_MISC_GSE REG_BIT(27)
+#define GEN8_DE_EDP_PSR REG_BIT(19)
+#define XELPDP_PMDEMAND_RSP REG_BIT(3)
#define GEN8_PCU_ISR _MMIO(0x444e0)
#define GEN8_PCU_IMR _MMIO(0x444e4)
@@ -4507,6 +4531,23 @@
#define XELPDP_DP_ALT_HPD_LONG_DETECT REG_BIT(1)
#define XELPDP_DP_ALT_HPD_SHORT_DETECT REG_BIT(0)
+#define XELPDP_INITIATE_PMDEMAND_REQUEST(dword) _MMIO(0x45230 + 4 * (dword))
+#define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16)
+#define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12)
+#define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8)
+#define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6)
+#define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4)
+#define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)
+
+#define XELPDP_PMDEMAND_REQ_ENABLE REG_BIT(31)
+#define XELPDP_PMDEMAND_CDCLK_FREQ_MASK REG_GENMASK(30, 20)
+#define XELPDP_PMDEMAND_DDICLK_FREQ_MASK REG_GENMASK(18, 8)
+#define XELPDP_PMDEMAND_SCALERS_MASK REG_GENMASK(6, 4)
+#define XELPDP_PMDEMAND_PLLS_MASK REG_GENMASK(2, 0)
+
+#define GEN12_DCPR_STATUS_1 _MMIO(0x46440)
+#define XELPDP_PMDEMAND_INFLIGHT_STATUS REG_BIT(26)
+
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT REG_BIT(25)
@@ -4666,6 +4707,9 @@
#define DCPR_SEND_RESP_IMM REG_BIT(25)
#define DCPR_CLEAR_MEMSTAT_DIS REG_BIT(24)
+#define XELPD_CHICKEN_DCPR_3 _MMIO(0x46438)
+#define DMD_RSP_TIMEOUT_DISABLE REG_BIT(19)
+
#define SKL_DFSM _MMIO(0x51000)
#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
@@ -4895,6 +4939,7 @@
#define SHPD_FILTER_CNT _MMIO(0xc4038)
#define SHPD_FILTER_CNT_500_ADJ 0x001D9
+#define SHPD_FILTER_CNT_250 0x000F8
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 630a732aaecc..7c7da284990d 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -290,7 +290,7 @@ static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
if (!i915_request_completed(rq)) {
if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
- schedule_work(&gt->watchdog.work);
+ queue_work(gt->i915->unordered_wq, &gt->watchdog.work);
} else {
i915_request_put(rq);
}
@@ -1220,7 +1220,7 @@ emit_semaphore_wait(struct i915_request *to,
/*
* If this or its dependents are waiting on an external fence
* that may fail catastrophically, then we want to avoid using
- * sempahores as they bypass the fence signaling metadata, and we
+ * semaphores as they bypass the fence signaling metadata, and we
* lose the fence->error propagation.
*/
if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
@@ -1353,7 +1353,7 @@ __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
{
mark_external(rq);
return i915_sw_fence_await_dma_fence(&rq->submit, fence,
- i915_fence_context_timeout(rq->engine->i915,
+ i915_fence_context_timeout(rq->i915,
fence->context),
I915_FENCE_GFP);
}
@@ -1661,6 +1661,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq,
request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
+ /*
+ * Users have to put a reference potentially got by
+ * __i915_active_fence_set() to the returned request
+ * when no longer needed
+ */
return to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
}
@@ -1707,6 +1712,10 @@ __i915_request_ensure_ordering(struct i915_request *rq,
0);
}
+ /*
+ * Users have to put the reference to prev potentially got
+ * by __i915_active_fence_set() when no longer needed
+ */
return prev;
}
@@ -1760,6 +1769,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
prev = __i915_request_ensure_ordering(rq, timeline);
else
prev = __i915_request_ensure_parallel_ordering(rq, timeline);
+ if (prev)
+ i915_request_put(prev);
/*
* Make sure that no request gazumped us - if it was allocated after
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f6f9228a1351..ce1cbee1b39d 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -277,7 +277,7 @@ TRACE_EVENT(i915_request_queue,
),
TP_fast_assign(
- __entry->dev = rq->engine->i915->drm.primary->index;
+ __entry->dev = rq->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -304,7 +304,7 @@ DECLARE_EVENT_CLASS(i915_request,
),
TP_fast_assign(
- __entry->dev = rq->engine->i915->drm.primary->index;
+ __entry->dev = rq->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -353,7 +353,7 @@ TRACE_EVENT(i915_request_in,
),
TP_fast_assign(
- __entry->dev = rq->engine->i915->drm.primary->index;
+ __entry->dev = rq->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -382,7 +382,7 @@ TRACE_EVENT(i915_request_out,
),
TP_fast_assign(
- __entry->dev = rq->engine->i915->drm.primary->index;
+ __entry->dev = rq->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -623,7 +623,7 @@ TRACE_EVENT(i915_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
- __entry->dev = rq->engine->i915->drm.primary->index;
+ __entry->dev = rq->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ffb425ba591c..e52089564d79 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -34,6 +34,7 @@
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
+#include "gt/intel_tlb.h"
#include "i915_drv.h"
#include "i915_gem_evict.h"
@@ -74,14 +75,14 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
char buf[512];
if (!vma->node.stack) {
- drm_dbg(&to_i915(vma->obj->base.dev)->drm,
+ drm_dbg(vma->obj->base.dev,
"vma.node [%08llx + %08llx] %s: unknown owner\n",
vma->node.start, vma->node.size, reason);
return;
}
stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
- drm_dbg(&to_i915(vma->obj->base.dev)->drm,
+ drm_dbg(vma->obj->base.dev,
"vma.node [%08llx + %08llx] %s: inserted at %s\n",
vma->node.start, vma->node.size, reason, buf);
}
@@ -805,7 +806,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
* attempt to find space.
*/
if (size > end - 2 * guard) {
- drm_dbg(&to_i915(vma->obj->base.dev)->drm,
+ drm_dbg(vma->obj->base.dev,
"Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
size, flags & PIN_MAPPABLE ? "mappable" : "total", end);
return -ENOSPC;
@@ -1339,6 +1340,12 @@ err_unpin:
void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
{
+ struct intel_gt *gt;
+ int id;
+
+ if (!tlb)
+ return;
+
/*
* Before we release the pages that were bound by this vma, we
* must invalidate all the TLBs that may still have a reference
@@ -1347,7 +1354,9 @@ void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
* the most recent TLB invalidation seqno, and if we have not yet
* flushed the TLBs upon release, perform a full invalidation.
*/
- WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
+ for_each_gt(gt, vm->i915, id)
+ WRITE_ONCE(tlb[id],
+ intel_gt_next_invalidate_tlb_full(vm->gt));
}
static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
@@ -1629,6 +1638,26 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
return err;
}
+/**
+ * i915_ggtt_clear_scanout - Clear scanout flag for all objects ggtt vmas
+ * @obj: i915 GEM object
+ * This function clears scanout flags for objects ggtt vmas. These flags are set
+ * when object is pinned for display use and this function to clear them all is
+ * targeted to be called by frontbuffer tracking code when the frontbuffer is
+ * about to be released.
+ */
+void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj) {
+ i915_vma_clear_scanout(vma);
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ }
+ spin_unlock(&obj->vma.lock);
+}
+
static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
{
/*
@@ -1908,7 +1937,7 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
if (flags & EXEC_OBJECT_WRITE) {
struct intel_frontbuffer *front;
- front = __intel_frontbuffer_get(obj);
+ front = i915_gem_object_get_frontbuffer(obj);
if (unlikely(front)) {
if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
i915_active_add_request(&front->write, rq);
@@ -1994,7 +2023,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
if (async)
unbind_fence = i915_vma_resource_unbind(vma_res,
- &vma->obj->mm.tlb);
+ vma->obj->mm.tlb);
else
unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
@@ -2011,7 +2040,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
dma_fence_put(unbind_fence);
unbind_fence = NULL;
}
- vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
+ vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 9a9729205d5b..e356dfb883d3 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -418,6 +418,11 @@ i915_vma_unpin_fence(struct i915_vma *vma)
__i915_vma_unpin_fence(vma);
}
+static inline int i915_vma_fence_id(const struct i915_vma *vma)
+{
+ return vma->fence ? vma->fence->id : -1;
+}
+
void i915_vma_parked(struct intel_gt *gt);
static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
@@ -435,6 +440,8 @@ static inline void i915_vma_clear_scanout(struct i915_vma *vma)
clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
}
+void i915_ggtt_clear_scanout(struct drm_i915_gem_object *obj);
+
#define for_each_until(cond) if (cond) break; else
/**
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index a27600bc5976..81a4d32734e9 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -456,12 +456,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_rmw(&i915->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN);
/* WaDisableSDEUnitClockGating:kbl */
- if (IS_KBL_GRAPHICS_STEP(i915, 0, STEP_C0))
+ if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, 0, STEP_C0))
intel_uncore_rmw(&i915->uncore, GEN8_UCGCTL6,
0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
- if (IS_KBL_GRAPHICS_STEP(i915, 0, STEP_C0))
+ if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, 0, STEP_C0))
intel_uncore_rmw(&i915->uncore, GEN6_UCGCTL1,
0, GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
@@ -559,9 +559,20 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
static void hsw_init_clock_gating(struct drm_i915_private *i915)
{
+ enum pipe pipe;
+
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(PIPE_A), 0, HSW_FBCQ_DIS);
+ /* WaPsrDPAMaskVBlankInSRD:hsw */
+ intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD);
+
+ for_each_pipe(i915, pipe) {
+ /* WaPsrDPRSUnmaskVBlankInSRD:hsw */
+ intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe),
+ 0, HSW_UNMASK_VBL_TO_REGS_IN_SRD);
+ }
+
/* This is required by WaCatErrorRejectionIssue:hsw */
intel_uncore_rmw(&i915->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 2f79d232b04a..ea0ec6174ce5 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -27,9 +27,7 @@
#include <drm/drm_print.h>
#include <drm/i915_pciids.h>
-#include "display/intel_cdclk.h"
-#include "display/intel_de.h"
-#include "display/intel_display.h"
+#include "display/intel_display_device.h"
#include "gt/intel_gt_regs.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -95,9 +93,6 @@ void intel_device_info_print(const struct intel_device_info *info,
const struct intel_runtime_info *runtime,
struct drm_printer *p)
{
- const struct intel_display_runtime_info *display_runtime =
- &info->display->__runtime_defaults;
-
if (runtime->graphics.ip.rel)
drm_printf(p, "graphics version: %u.%02u\n",
runtime->graphics.ip.ver,
@@ -114,21 +109,13 @@ void intel_device_info_print(const struct intel_device_info *info,
drm_printf(p, "media version: %u\n",
runtime->media.ip.ver);
- if (display_runtime->ip.rel)
- drm_printf(p, "display version: %u.%02u\n",
- display_runtime->ip.ver,
- display_runtime->ip.rel);
- else
- drm_printf(p, "display version: %u\n",
- display_runtime->ip.ver);
-
drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step));
drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step));
drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step));
drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step));
drm_printf(p, "gt: %d\n", info->gt);
- drm_printf(p, "memory-regions: 0x%x\n", runtime->memory_regions);
+ drm_printf(p, "memory-regions: 0x%x\n", info->memory_regions);
drm_printf(p, "page-sizes: 0x%x\n", runtime->page_sizes);
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size);
@@ -140,15 +127,6 @@ void intel_device_info_print(const struct intel_device_info *info,
#undef PRINT_FLAG
drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu));
-
-#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display->name))
- DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
-#undef PRINT_FLAG
-
- drm_printf(p, "has_hdcp: %s\n", str_yes_no(display_runtime->has_hdcp));
- drm_printf(p, "has_dmc: %s\n", str_yes_no(display_runtime->has_dmc));
- drm_printf(p, "has_dsc: %s\n", str_yes_no(display_runtime->has_dsc));
-
drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
@@ -262,15 +240,19 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
if (find_devid(devid, subplatform_ult_ids,
ARRAY_SIZE(subplatform_ult_ids))) {
mask = BIT(INTEL_SUBPLATFORM_ULT);
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ DISPLAY_RUNTIME_INFO(i915)->port_mask &= ~BIT(PORT_D);
} else if (find_devid(devid, subplatform_ulx_ids,
ARRAY_SIZE(subplatform_ulx_ids))) {
mask = BIT(INTEL_SUBPLATFORM_ULX);
if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
/* ULX machines are also considered ULT. */
mask |= BIT(INTEL_SUBPLATFORM_ULT);
+ DISPLAY_RUNTIME_INFO(i915)->port_mask &= ~BIT(PORT_D);
}
} else if (find_devid(devid, subplatform_portf_ids,
ARRAY_SIZE(subplatform_portf_ids))) {
+ DISPLAY_RUNTIME_INFO(i915)->port_mask |= BIT(PORT_F);
mask = BIT(INTEL_SUBPLATFORM_PORTF);
} else if (find_devid(devid, subplatform_uy_ids,
ARRAY_SIZE(subplatform_uy_ids))) {
@@ -382,13 +364,6 @@ void intel_device_info_runtime_init_early(struct drm_i915_private *i915)
intel_device_info_subplatform_init(i915);
}
-/* FIXME: Remove this, and make device info a const pointer to rodata. */
-static struct intel_device_info *
-mkwrite_device_info(struct drm_i915_private *i915)
-{
- return (struct intel_device_info *)INTEL_INFO(i915);
-}
-
static const struct intel_display_device_info no_display = {};
/**
@@ -409,127 +384,24 @@ static const struct intel_display_device_info no_display = {};
*/
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
- struct intel_display_runtime_info *display_runtime =
- DISPLAY_RUNTIME_INFO(dev_priv);
- enum pipe pipe;
-
- /* Wa_14011765242: adl-s A0,A1 */
- if (IS_ADLS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A2))
- for_each_pipe(dev_priv, pipe)
- display_runtime->num_scalers[pipe] = 0;
- else if (DISPLAY_VER(dev_priv) >= 11) {
- for_each_pipe(dev_priv, pipe)
- display_runtime->num_scalers[pipe] = 2;
- } else if (DISPLAY_VER(dev_priv) >= 9) {
- display_runtime->num_scalers[PIPE_A] = 2;
- display_runtime->num_scalers[PIPE_B] = 2;
- display_runtime->num_scalers[PIPE_C] = 1;
- }
- BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
+ if (HAS_DISPLAY(dev_priv))
+ intel_display_device_info_runtime_init(dev_priv);
- if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
- for_each_pipe(dev_priv, pipe)
- display_runtime->num_sprites[pipe] = 4;
- else if (DISPLAY_VER(dev_priv) >= 11)
- for_each_pipe(dev_priv, pipe)
- display_runtime->num_sprites[pipe] = 6;
- else if (DISPLAY_VER(dev_priv) == 10)
- for_each_pipe(dev_priv, pipe)
- display_runtime->num_sprites[pipe] = 3;
- else if (IS_BROXTON(dev_priv)) {
- /*
- * Skylake and Broxton currently don't expose the topmost plane as its
- * use is exclusive with the legacy cursor and we only want to expose
- * one of those, not both. Until we can safely expose the topmost plane
- * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
- * we don't expose the topmost plane at all to prevent ABI breakage
- * down the line.
- */
-
- display_runtime->num_sprites[PIPE_A] = 2;
- display_runtime->num_sprites[PIPE_B] = 2;
- display_runtime->num_sprites[PIPE_C] = 1;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- for_each_pipe(dev_priv, pipe)
- display_runtime->num_sprites[pipe] = 2;
- } else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) {
- for_each_pipe(dev_priv, pipe)
- display_runtime->num_sprites[pipe] = 1;
- }
-
- if (HAS_DISPLAY(dev_priv) &&
- (IS_DGFX(dev_priv) || DISPLAY_VER(dev_priv) >= 14) &&
- !(intel_de_read(dev_priv, GU_CNTL_PROTECTED) & DEPRESENT)) {
- drm_info(&dev_priv->drm, "Display not present, disabling\n");
-
- display_runtime->pipe_mask = 0;
+ /* Display may have been disabled by runtime init */
+ if (!HAS_DISPLAY(dev_priv)) {
+ dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
+ DRIVER_ATOMIC);
+ dev_priv->display.info.__device_info = &no_display;
}
- if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) &&
- HAS_PCH_SPLIT(dev_priv)) {
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 sfuse_strap = intel_de_read(dev_priv, SFUSE_STRAP);
-
- /*
- * SFUSE_STRAP is supposed to have a bit signalling the display
- * is fused off. Unfortunately it seems that, at least in
- * certain cases, fused off display means that PCH display
- * reads don't land anywhere. In that case, we read 0s.
- *
- * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
- * should be set when taking over after the firmware.
- */
- if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
- sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
- (HAS_PCH_CPT(dev_priv) &&
- !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
- drm_info(&dev_priv->drm,
- "Display fused off, disabling\n");
- display_runtime->pipe_mask = 0;
- } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
- drm_info(&dev_priv->drm, "PipeC fused off\n");
- display_runtime->pipe_mask &= ~BIT(PIPE_C);
- display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
- }
- } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) {
- u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
-
- if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
- display_runtime->pipe_mask &= ~BIT(PIPE_A);
- display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
- display_runtime->fbc_mask &= ~BIT(INTEL_FBC_A);
- }
- if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
- display_runtime->pipe_mask &= ~BIT(PIPE_B);
- display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
- }
- if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
- display_runtime->pipe_mask &= ~BIT(PIPE_C);
- display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
- }
-
- if (DISPLAY_VER(dev_priv) >= 12 &&
- (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
- display_runtime->pipe_mask &= ~BIT(PIPE_D);
- display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
- }
-
- if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
- display_runtime->has_hdcp = 0;
-
- if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
- display_runtime->fbc_mask = 0;
-
- if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
- display_runtime->has_dmc = 0;
+ /* Disable nuclear pageflip by default on pre-g4x */
+ if (!dev_priv->params.nuclear_pageflip &&
+ DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
- if (IS_DISPLAY_VER(dev_priv, 10, 12) &&
- (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE))
- display_runtime->has_dsc = 0;
- }
+ BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
if (GRAPHICS_VER(dev_priv) == 6 && i915_vtd_active(dev_priv)) {
drm_info(&dev_priv->drm,
@@ -540,24 +412,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->rawclk_freq = intel_read_rawclk(dev_priv);
drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
- if (!HAS_DISPLAY(dev_priv)) {
- dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
- DRIVER_ATOMIC);
- info->display = &no_display;
-
- display_runtime->cpu_transcoder_mask = 0;
- memset(display_runtime->num_sprites, 0, sizeof(display_runtime->num_sprites));
- memset(display_runtime->num_scalers, 0, sizeof(display_runtime->num_scalers));
- display_runtime->fbc_mask = 0;
- display_runtime->has_hdcp = false;
- display_runtime->has_dmc = false;
- display_runtime->has_dsc = false;
- }
-
- /* Disable nuclear pageflip by default on pre-g4x */
- if (!dev_priv->params.nuclear_pageflip &&
- DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
}
/*
@@ -569,26 +423,24 @@ void intel_device_info_driver_create(struct drm_i915_private *i915,
u16 device_id,
const struct intel_device_info *match_info)
{
- struct intel_device_info *info;
struct intel_runtime_info *runtime;
u16 ver, rel, step;
- /* Setup the write-once "constant" device info */
- info = mkwrite_device_info(i915);
- memcpy(info, match_info, sizeof(*info));
+ /* Setup INTEL_INFO() */
+ i915->__info = match_info;
/* Initialize initial runtime info from static const data and pdev. */
runtime = RUNTIME_INFO(i915);
memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime));
/* Probe display support */
- info->display = intel_display_device_probe(i915, info->has_gmd_id,
- &ver, &rel, &step);
+ i915->display.info.__device_info = intel_display_device_probe(i915, HAS_GMD_ID(i915),
+ &ver, &rel, &step);
memcpy(DISPLAY_RUNTIME_INFO(i915),
&DISPLAY_INFO(i915)->__runtime_defaults,
sizeof(*DISPLAY_RUNTIME_INFO(i915)));
- if (info->has_gmd_id) {
+ if (HAS_GMD_ID(i915)) {
DISPLAY_RUNTIME_INFO(i915)->ip.ver = ver;
DISPLAY_RUNTIME_INFO(i915)->ip.rel = rel;
DISPLAY_RUNTIME_INFO(i915)->ip.step = step;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 069291b3bd37..dbfe6443457b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -29,8 +29,6 @@
#include "intel_step.h"
-#include "display/intel_display_device.h"
-
#include "gt/intel_engine_types.h"
#include "gt/intel_context_types.h"
#include "gt/intel_sseu.h"
@@ -212,8 +210,6 @@ struct intel_runtime_info {
u16 device_id;
- intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
-
u32 rawclk_freq;
struct intel_step_info step;
@@ -223,8 +219,6 @@ struct intel_runtime_info {
enum intel_ppgtt_type ppgtt_type;
unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
- u32 memory_regions; /* regions supported by the HW */
-
bool has_pooled_eu;
};
@@ -237,12 +231,13 @@ struct intel_device_info {
u8 gt; /* GT number, 0 if undefined */
+ intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
+ u32 memory_regions; /* regions supported by the HW */
+
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
- const struct intel_display_device_info *display;
-
/*
* Initial runtime info. Do not access outside of i915_driver_create().
*/
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index cf5122299b6b..6d8e5e5c0cba 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -658,5 +658,5 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
init_intel_runtime_pm_wakeref(rpm);
INIT_LIST_HEAD(&rpm->lmem_userfault_list);
spin_lock_init(&rpm->lmem_userfault_lock);
- intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm);
+ intel_wakeref_auto_init(&rpm->userfault_wakeref, i915);
}
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index 8a9ff6227e53..c02a6f156a00 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -192,16 +192,16 @@ void intel_step_init(struct drm_i915_private *i915)
} else if (IS_XEHPSDV(i915)) {
revids = xehpsdv_revids;
size = ARRAY_SIZE(xehpsdv_revids);
- } else if (IS_ADLP_N(i915)) {
+ } else if (IS_ALDERLAKE_P_N(i915)) {
revids = adlp_n_revids;
size = ARRAY_SIZE(adlp_n_revids);
- } else if (IS_ADLP_RPLP(i915)) {
+ } else if (IS_RAPTORLAKE_P(i915)) {
revids = adlp_rplp_revids;
size = ARRAY_SIZE(adlp_rplp_revids);
} else if (IS_ALDERLAKE_P(i915)) {
revids = adlp_revids;
size = ARRAY_SIZE(adlp_revids);
- } else if (IS_ADLS_RPLS(i915)) {
+ } else if (IS_RAPTORLAKE_S(i915)) {
revids = adls_rpls_revids;
size = ARRAY_SIZE(adls_rpls_revids);
} else if (IS_ALDERLAKE_S(i915)) {
@@ -213,13 +213,13 @@ void intel_step_init(struct drm_i915_private *i915)
} else if (IS_ROCKETLAKE(i915)) {
revids = rkl_revids;
size = ARRAY_SIZE(rkl_revids);
- } else if (IS_TGL_UY(i915)) {
+ } else if (IS_TIGERLAKE_UY(i915)) {
revids = tgl_uy_revids;
size = ARRAY_SIZE(tgl_uy_revids);
} else if (IS_TIGERLAKE(i915)) {
revids = tgl_revids;
size = ARRAY_SIZE(tgl_revids);
- } else if (IS_JSL_EHL(i915)) {
+ } else if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
revids = jsl_ehl_revids;
size = ARRAY_SIZE(jsl_ehl_revids);
} else if (IS_ICELAKE(i915)) {
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 796ebfe6c550..dfefad5a5fec 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1925,25 +1925,31 @@ __unclaimed_previous_reg_debug(struct intel_uncore *uncore,
i915_mmio_reg_offset(reg));
}
-static inline void
-unclaimed_reg_debug(struct intel_uncore *uncore,
- const i915_reg_t reg,
- const bool read,
- const bool before)
+static inline bool __must_check
+unclaimed_reg_debug_header(struct intel_uncore *uncore,
+ const i915_reg_t reg, const bool read)
{
if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
- return;
+ return false;
/* interrupts are disabled and re-enabled around uncore->lock usage */
lockdep_assert_held(&uncore->lock);
- if (before) {
- spin_lock(&uncore->debug->lock);
- __unclaimed_previous_reg_debug(uncore, reg, read);
- } else {
- __unclaimed_reg_debug(uncore, reg, read);
- spin_unlock(&uncore->debug->lock);
- }
+ spin_lock(&uncore->debug->lock);
+ __unclaimed_previous_reg_debug(uncore, reg, read);
+
+ return true;
+}
+
+static inline void
+unclaimed_reg_debug_footer(struct intel_uncore *uncore,
+ const i915_reg_t reg, const bool read)
+{
+ /* interrupts are disabled and re-enabled around uncore->lock usage */
+ lockdep_assert_held(&uncore->lock);
+
+ __unclaimed_reg_debug(uncore, reg, read);
+ spin_unlock(&uncore->debug->lock);
}
#define __vgpu_read(x) \
@@ -2001,13 +2007,15 @@ __gen2_read(64)
#define GEN6_READ_HEADER(x) \
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
+ bool unclaimed_reg_debug; \
u##x val = 0; \
assert_rpm_wakelock_held(uncore->rpm); \
spin_lock_irqsave(&uncore->lock, irqflags); \
- unclaimed_reg_debug(uncore, reg, true, true)
+ unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true)
#define GEN6_READ_FOOTER \
- unclaimed_reg_debug(uncore, reg, true, false); \
+ if (unclaimed_reg_debug) \
+ unclaimed_reg_debug_footer(uncore, reg, true); \
spin_unlock_irqrestore(&uncore->lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
@@ -2105,13 +2113,15 @@ __gen2_write(32)
#define GEN6_WRITE_HEADER \
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
+ bool unclaimed_reg_debug; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_rpm_wakelock_held(uncore->rpm); \
spin_lock_irqsave(&uncore->lock, irqflags); \
- unclaimed_reg_debug(uncore, reg, false, true)
+ unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false)
#define GEN6_WRITE_FOOTER \
- unclaimed_reg_debug(uncore, reg, false, false); \
+ if (unclaimed_reg_debug) \
+ unclaimed_reg_debug_footer(uncore, reg, false); \
spin_unlock_irqrestore(&uncore->lock, irqflags)
#define __gen6_write(x) \
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 9ea1f4864a3a..f419c311a0de 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -496,6 +496,11 @@ static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
return (reg_val & mask) != expected_val ? -EINVAL : 0;
}
+static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
+{
+ return uncore->regs;
+}
+
/*
* The raw_reg_{read,write} macros are intended as a micro-optimization for
* interrupt handlers so that the pointer indirection on uncore->regs can
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index dfd87d082218..718f2f1b6174 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -8,17 +8,18 @@
#include "intel_runtime_pm.h"
#include "intel_wakeref.h"
+#include "i915_drv.h"
static void rpm_get(struct intel_wakeref *wf)
{
- wf->wakeref = intel_runtime_pm_get(wf->rpm);
+ wf->wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm);
}
static void rpm_put(struct intel_wakeref *wf)
{
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
- intel_runtime_pm_put(wf->rpm, wakeref);
+ intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
INTEL_WAKEREF_BUG_ON(!wakeref);
}
@@ -74,7 +75,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
/* Assume we are not in process context and so cannot sleep. */
if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
- mod_delayed_work(system_wq, &wf->work,
+ mod_delayed_work(wf->i915->unordered_wq, &wf->work,
FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
return;
}
@@ -94,11 +95,11 @@ static void __intel_wakeref_put_work(struct work_struct *wrk)
}
void __intel_wakeref_init(struct intel_wakeref *wf,
- struct intel_runtime_pm *rpm,
+ struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
struct intel_wakeref_lockclass *key)
{
- wf->rpm = rpm;
+ wf->i915 = i915;
wf->ops = ops;
__mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
@@ -137,17 +138,17 @@ static void wakeref_auto_timeout(struct timer_list *t)
wakeref = fetch_and_zero(&wf->wakeref);
spin_unlock_irqrestore(&wf->lock, flags);
- intel_runtime_pm_put(wf->rpm, wakeref);
+ intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
}
void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
- struct intel_runtime_pm *rpm)
+ struct drm_i915_private *i915)
{
spin_lock_init(&wf->lock);
timer_setup(&wf->timer, wakeref_auto_timeout, 0);
refcount_set(&wf->count, 0);
wf->wakeref = 0;
- wf->rpm = rpm;
+ wf->i915 = i915;
}
void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
@@ -161,13 +162,14 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
}
/* Our mission is that we only extend an already active wakeref */
- assert_rpm_wakelock_held(wf->rpm);
+ assert_rpm_wakelock_held(&wf->i915->runtime_pm);
if (!refcount_inc_not_zero(&wf->count)) {
spin_lock_irqsave(&wf->lock, flags);
if (!refcount_inc_not_zero(&wf->count)) {
INTEL_WAKEREF_BUG_ON(wf->wakeref);
- wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
+ wf->wakeref =
+ intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm);
refcount_set(&wf->count, 1);
}
spin_unlock_irqrestore(&wf->lock, flags);
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 0b6b4852ab23..ec881b097368 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -39,7 +39,7 @@ struct intel_wakeref {
intel_wakeref_t wakeref;
- struct intel_runtime_pm *rpm;
+ struct drm_i915_private *i915;
const struct intel_wakeref_ops *ops;
struct delayed_work work;
@@ -51,13 +51,13 @@ struct intel_wakeref_lockclass {
};
void __intel_wakeref_init(struct intel_wakeref *wf,
- struct intel_runtime_pm *rpm,
+ struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
struct intel_wakeref_lockclass *key);
-#define intel_wakeref_init(wf, rpm, ops) do { \
+#define intel_wakeref_init(wf, i915, ops) do { \
static struct intel_wakeref_lockclass __key; \
\
- __intel_wakeref_init((wf), (rpm), (ops), &__key); \
+ __intel_wakeref_init((wf), (i915), (ops), &__key); \
} while (0)
int __intel_wakeref_get_first(struct intel_wakeref *wf);
@@ -262,7 +262,7 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf)
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
struct intel_wakeref_auto {
- struct intel_runtime_pm *rpm;
+ struct drm_i915_private *i915;
struct timer_list timer;
intel_wakeref_t wakeref;
spinlock_t lock;
@@ -287,7 +287,7 @@ struct intel_wakeref_auto {
void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
- struct intel_runtime_pm *rpm);
+ struct drm_i915_private *i915);
void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
#endif /* INTEL_WAKEREF_H */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index bb2e15329f34..38ec754d0ec8 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -162,8 +162,8 @@ static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i9
* for HuC authentication. For now, its limited to DG2.
*/
if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) &&
- intel_huc_is_loaded_by_gsc(&i915->gt0.uc.huc) && intel_uc_uses_huc(&i915->gt0.uc))
- return &i915->gt0;
+ intel_huc_is_loaded_by_gsc(&to_gt(i915)->uc.huc) && intel_uc_uses_huc(&to_gt(i915)->uc))
+ return to_gt(i915);
return NULL;
}
@@ -188,8 +188,8 @@ static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_p
* Else we rely on mei-pxp module but only on legacy platforms
* prior to having separate media GTs and has a valid VDBOX.
*/
- if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(&i915->gt0))
- return &i915->gt0;
+ if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915)))
+ return to_gt(i915);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
index f13890ec7db1..2a600184a077 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
@@ -6,6 +6,7 @@
#include "gem/i915_gem_internal.h"
#include "gt/intel_context.h"
+#include "gt/intel_gt.h"
#include "gt/uc/intel_gsc_fw.h"
#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
@@ -197,7 +198,7 @@ bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
* are out of order) will suffice.
*/
if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc, INTEL_HUC_AUTH_BY_GSC) &&
- intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc))
+ intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc, true))
return true;
return false;
@@ -336,7 +337,7 @@ gsccs_create_buffer(struct intel_gt *gt,
}
/* return a virtual pointer */
- *map = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+ *map = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
if (IS_ERR(*map)) {
drm_err(&i915->drm, "Failed to map gsccs backend %s.\n", bufname);
err = PTR_ERR(*map);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 1ce07d7e8769..80bb00189865 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -11,6 +11,7 @@
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
+#include "gt/intel_gt.h"
#include "intel_pxp.h"
#include "intel_pxp_cmd_interface_42.h"
@@ -245,7 +246,9 @@ static int alloc_streaming_command(struct intel_pxp *pxp)
}
/* map the lmem into the virtual memory pointer */
- cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+ cmd = i915_gem_object_pin_map_unlocked(obj,
+ intel_gt_coherent_map_type(pxp->ctrl_gt,
+ obj, true));
if (IS_ERR(cmd)) {
drm_err(&i915->drm, "Failed to map gsc message page!\n");
err = PTR_ERR(cmd);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 36940ef10108..5c397a2df70e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -391,7 +391,7 @@ static void close_object_list(struct list_head *objects,
struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj, *on;
- int ignored;
+ int __maybe_unused ignored;
list_for_each_entry_safe(obj, on, objects, st_link) {
struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index d4608b220123..403134a7acec 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -168,7 +168,7 @@ static int write_timestamp(struct i915_request *rq, int slot)
return PTR_ERR(cs);
len = 5;
- if (GRAPHICS_VER(rq->engine->i915) >= 8)
+ if (GRAPHICS_VER(rq->i915) >= 8)
len++;
*cs++ = GFX_OP_PIPE_CONTROL(len);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 39da0fb0d6d2..ee79e0809a6d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -24,6 +24,8 @@
#include <linux/random.h>
#include "gt/intel_gt_pm.h"
+#include "gt/uc/intel_gsc_fw.h"
+
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_selftest.h"
@@ -127,6 +129,31 @@ static void set_default_test_all(struct selftest *st, unsigned int count)
st[i].enabled = true;
}
+static bool
+__gsc_proxy_init_progressing(struct intel_gsc_uc *gsc)
+{
+ return intel_gsc_uc_fw_proxy_get_status(gsc) == -EAGAIN;
+}
+
+static void
+__wait_gsc_proxy_completed(struct drm_i915_private *i915)
+{
+ bool need_to_wait = (IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY) &&
+ i915->media_gt &&
+ HAS_ENGINE(i915->media_gt, GSC0) &&
+ intel_uc_fw_is_loadable(&i915->media_gt->uc.gsc.fw));
+ /*
+ * The gsc proxy component depends on the kernel component driver load ordering
+ * and in corner cases (the first time after an IFWI flash), init-completion
+ * firmware flows take longer.
+ */
+ unsigned long timeout_ms = 8000;
+
+ if (need_to_wait && wait_for(!__gsc_proxy_init_progressing(&i915->media_gt->uc.gsc),
+ timeout_ms))
+ pr_warn(DRIVER_NAME "Timed out waiting for gsc_proxy_completion!\n");
+}
+
static int __run_selftests(const char *name,
struct selftest *st,
unsigned int count,
@@ -206,6 +233,8 @@ int i915_live_selftests(struct pci_dev *pdev)
if (!i915_selftest.live)
return 0;
+ __wait_gsc_proxy_completed(pdev_to_i915(pdev));
+
err = run_selftests(live, pdev_to_i915(pdev));
if (err) {
i915_selftest.live = err;
@@ -227,6 +256,8 @@ int i915_perf_selftests(struct pci_dev *pdev)
if (!i915_selftest.perf)
return 0;
+ __wait_gsc_proxy_completed(pdev_to_i915(pdev));
+
err = run_selftests(perf, pdev_to_i915(pdev));
if (err) {
i915_selftest.perf = err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index daa985e5a19b..8f5ce71fa453 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -523,12 +523,19 @@ static void task_ipc(struct work_struct *work)
static int test_ipc(void *arg)
{
struct task_ipc ipc;
+ struct workqueue_struct *wq;
int ret = 0;
+ wq = alloc_workqueue("i1915-selftest", 0, 0);
+ if (wq == NULL)
+ return -ENOMEM;
+
/* Test use of i915_sw_fence as an interprocess signaling mechanism */
ipc.in = alloc_fence();
- if (!ipc.in)
- return -ENOMEM;
+ if (!ipc.in) {
+ ret = -ENOMEM;
+ goto err_work;
+ }
ipc.out = alloc_fence();
if (!ipc.out) {
ret = -ENOMEM;
@@ -540,7 +547,7 @@ static int test_ipc(void *arg)
ipc.value = 0;
INIT_WORK_ONSTACK(&ipc.work, task_ipc);
- schedule_work(&ipc.work);
+ queue_work(wq, &ipc.work);
wait_for_completion(&ipc.started);
@@ -563,6 +570,9 @@ static int test_ipc(void *arg)
free_fence(ipc.out);
err_in:
free_fence(ipc.in);
+err_work:
+ destroy_workqueue(wq);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 618d9386d554..0f064930ef11 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -97,7 +97,7 @@ int igt_spinner_pin(struct igt_spinner *spin,
if (!spin->batch) {
unsigned int mode;
- mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false);
+ mode = intel_gt_coherent_map_type(spin->gt, spin->obj, false);
vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -159,15 +159,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
batch = spin->batch;
- if (GRAPHICS_VER(rq->engine->i915) >= 8) {
+ if (GRAPHICS_VER(rq->i915) >= 8) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
- } else if (GRAPHICS_VER(rq->engine->i915) >= 6) {
+ } else if (GRAPHICS_VER(rq->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
- } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
+ } else if (GRAPHICS_VER(rq->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = hws_address(hws, rq);
@@ -179,11 +179,11 @@ igt_spinner_create_request(struct igt_spinner *spin,
*batch++ = arbitration_command;
- if (GRAPHICS_VER(rq->engine->i915) >= 8)
+ if (GRAPHICS_VER(rq->i915) >= 8)
*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
- else if (IS_HASWELL(rq->engine->i915))
+ else if (IS_HASWELL(rq->i915))
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
- else if (GRAPHICS_VER(rq->engine->i915) >= 6)
+ else if (GRAPHICS_VER(rq->i915) >= 6)
*batch++ = MI_BATCH_BUFFER_START;
else
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
@@ -201,7 +201,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
}
flags = 0;
- if (GRAPHICS_VER(rq->engine->i915) <= 5)
+ if (GRAPHICS_VER(rq->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index e4281508d580..03ea75cd84dd 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -210,7 +210,7 @@ static int live_forcewake_ops(void *arg)
for_each_engine(engine, gt, id) {
i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
- u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
+ u32 __iomem *reg = intel_uncore_regs(uncore) + engine->mmio_base + r->offset;
enum forcewake_domains fw_domains;
u32 val;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 0eda8b4ee17f..da0b269606c5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -69,6 +69,7 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_drain_workqueue(i915);
mock_fini_ggtt(to_gt(i915)->ggtt);
+ destroy_workqueue(i915->unordered_wq);
destroy_workqueue(i915->wq);
intel_region_ttm_device_fini(i915);
@@ -113,19 +114,35 @@ static struct dev_pm_domain pm_domain = {
static void mock_gt_probe(struct drm_i915_private *i915)
{
- i915->gt[0] = &i915->gt0;
+ i915->gt[0] = to_gt(i915);
i915->gt[0]->name = "Mock GT";
}
+static const struct intel_device_info mock_info = {
+ .__runtime.graphics.ip.ver = -1,
+ .__runtime.page_sizes = (I915_GTT_PAGE_SIZE_4K |
+ I915_GTT_PAGE_SIZE_64K |
+ I915_GTT_PAGE_SIZE_2M),
+ .memory_regions = REGION_SMEM,
+ .platform_engine_mask = BIT(0),
+
+ /* simply use legacy cache level for mock device */
+ .max_pat_index = 3,
+ .cachelevel_to_pat = {
+ [I915_CACHE_NONE] = 0,
+ [I915_CACHE_LLC] = 1,
+ [I915_CACHE_L3_LLC] = 2,
+ [I915_CACHE_WT] = 3,
+ },
+};
+
struct drm_i915_private *mock_gem_device(void)
{
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
#endif
struct drm_i915_private *i915;
- struct intel_device_info *i915_info;
struct pci_dev *pdev;
- unsigned int i;
int ret;
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
@@ -158,15 +175,18 @@ struct drm_i915_private *mock_gem_device(void)
pci_set_drvdata(pdev, i915);
+ /* Device parameters start as a copy of module parameters. */
+ i915_params_copy(&i915->params, &i915_modparams);
+
+ /* Set up device info and initial runtime info. */
+ intel_device_info_driver_create(i915, pdev->device, &mock_info);
+
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (pm_runtime_enabled(&pdev->dev))
WARN_ON(pm_runtime_get_sync(&pdev->dev));
-
- i915_params_copy(&i915->params, &i915_modparams);
-
intel_runtime_pm_init_early(&i915->runtime_pm);
/* wakeref tracking has significant overhead */
i915->runtime_pm.no_wakeref_tracking = true;
@@ -174,21 +194,6 @@ struct drm_i915_private *mock_gem_device(void)
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
- RUNTIME_INFO(i915)->graphics.ip.ver = -1;
-
- RUNTIME_INFO(i915)->page_sizes =
- I915_GTT_PAGE_SIZE_4K |
- I915_GTT_PAGE_SIZE_64K |
- I915_GTT_PAGE_SIZE_2M;
-
- RUNTIME_INFO(i915)->memory_regions = REGION_SMEM;
-
- /* simply use legacy cache level for mock device */
- i915_info = (struct intel_device_info *)INTEL_INFO(i915);
- i915_info->max_pat_index = 3;
- for (i = 0; i < I915_MAX_CACHE_LEVEL; i++)
- i915_info->cachelevel_to_pat[i] = i;
-
intel_memory_regions_hw_probe(i915);
spin_lock_init(&i915->gpu_error.lock);
@@ -208,6 +213,10 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->wq)
goto err_drv;
+ i915->unordered_wq = alloc_workqueue("mock-unordered", 0, 0);
+ if (!i915->unordered_wq)
+ goto err_wq;
+
mock_init_contexts(i915);
/* allocate the ggtt */
@@ -218,7 +227,6 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(to_gt(i915));
to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
- RUNTIME_INFO(i915)->platform_engine_mask = BIT(0);
to_gt(i915)->info.engine_mask = BIT(0);
to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
@@ -239,6 +247,8 @@ struct drm_i915_private *mock_gem_device(void)
err_context:
intel_gt_driver_remove(to_gt(i915));
err_unlock:
+ destroy_workqueue(i915->unordered_wq);
+err_wq:
destroy_workqueue(i915->wq);
err_drv:
intel_region_ttm_device_fini(i915);
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index 9f0651d48d41..15492b69f698 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -704,7 +704,7 @@ void intel_dram_edram_detect(struct drm_i915_private *i915)
if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || GRAPHICS_VER(i915) >= 9))
return;
- edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
+ edram_cap = intel_uncore_read_fw(&i915->uncore, HSW_EDRAM_CAP);
/* NB: We can't write IDICR yet because we don't have gt funcs set up */
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
index 6d0204942f7a..49c7fb16e934 100644
--- a/drivers/gpu/drm/i915/soc/intel_gmch.c
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -47,11 +47,9 @@ intel_alloc_mchbar_resource(struct drm_i915_private *i915)
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
- if (mchbar_addr &&
+ if (IS_ENABLED(CONFIG_PNP) && mchbar_addr &&
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
return 0;
-#endif
/* Get some space for it */
i915->gmch.mch_res.name = "i915 MCHBAR";
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
index ba9843cb1b13..19a8f27c404e 100644
--- a/drivers/gpu/drm/i915/soc/intel_pch.c
+++ b/drivers/gpu/drm/i915/soc/intel_pch.c
@@ -32,21 +32,21 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm,
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
- IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
drm_WARN_ON(&dev_priv->drm,
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
- !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_WPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
drm_WARN_ON(&dev_priv->drm,
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
- IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
/* WPT is LPT compatible */
return PCH_LPT;
case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
@@ -54,7 +54,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm,
!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
drm_WARN_ON(&dev_priv->drm,
- !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
/* WPT is LPT compatible */
return PCH_LPT;
case INTEL_PCH_SPT_DEVICE_ID_TYPE:
@@ -115,7 +115,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
return PCH_ICP;
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
+ IS_ELKHARTLAKE(dev_priv)));
/* MCC is TGP compatible */
return PCH_TGP;
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
@@ -127,7 +128,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
+ IS_ELKHARTLAKE(dev_priv)));
/* JSP is ICP compatible */
return PCH_ICP;
case INTEL_PCH_ADP_DEVICE_ID_TYPE:
@@ -177,7 +179,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
- else if (IS_JSL_EHL(dev_priv))
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
else if (IS_ICELAKE(dev_priv))
id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
@@ -186,7 +188,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ else if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c
index 5d1779ab65c0..4f3af0dfb344 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c
@@ -4,8 +4,9 @@
*/
#include <linux/clk.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <drm/drm_bridge_connector.h>
@@ -198,7 +199,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0);
if (!dcss->of_port) {
- dev_err(dev, "no port@0 node in %s\n", dev->of_node->full_name);
+ dev_err(dev, "no port@0 node in %pOF\n", dev->of_node);
ret = -ENODEV;
goto clks_err;
}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
index 4f2291610139..c68b0d93ae9e 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-drv.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -66,6 +66,7 @@ static int dcss_drv_platform_probe(struct platform_device *pdev)
mdrv->kms = dcss_kms_attach(mdrv->dcss);
if (IS_ERR(mdrv->kms)) {
err = PTR_ERR(mdrv->kms);
+ dev_err_probe(dev, err, "Failed to initialize KMS\n");
goto dcss_shutoff;
}
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
index 5f26090b0c98..89585b31b985 100644
--- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
@@ -310,7 +310,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n",
sig_cfg.mode.hactive, new_hactive);
- sig_cfg.mode.hfront_porch = new_hactive - sig_cfg.mode.hactive;
+ sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive;
sig_cfg.mode.hactive = new_hactive;
}
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
index 80142d9a4a55..dade8b59feae 100644
--- a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
@@ -618,6 +618,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
width = ipu_src_rect_width(new_state);
else
width = drm_rect_width(&new_state->src) >> 16;
+ height = drm_rect_height(&new_state->src) >> 16;
eba = drm_plane_state_to_eba(new_state, 0);
@@ -628,9 +629,9 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
if (ipu_state->use_pre) {
axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width,
- drm_rect_height(&new_state->src) >> 16,
- fb->pitches[0], fb->format->format,
- fb->modifier, &eba);
+ height, fb->pitches[0],
+ fb->format->format, fb->modifier,
+ &eba);
}
if (!old_state->fb ||
@@ -684,7 +685,6 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width);
- height = drm_rect_height(&new_state->src) >> 16;
info = drm_format_info(fb->format->format);
ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
&burstsize, &num_bursts);
@@ -747,8 +747,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
ipu_cpmem_zero(ipu_plane->alpha_ch);
- ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width,
- drm_rect_height(&new_state->src) >> 16);
+ ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width, height);
ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
ipu_idmac_set_double_buffer(ipu_plane->alpha_ch, 1);
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
index 277ead6a459a..22b65f4a0e30 100644
--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -19,8 +19,8 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#define IMX21LCDC_LSSAR 0x0000 /* LCDC Screen Start Address Register */
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index a53f475d33df..b440e0cdc057 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -9,6 +9,8 @@ config DRM_INGENIC
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
+ select REGMAP
+ select REGMAP_MMIO
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the Ingenic SoCs.
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 5ec75e9ba499..8dbd4847d3a6 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -14,7 +14,7 @@
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
index 7a43505011a5..6d236547f611 100644
--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
+++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/time.h>
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 39cab4a55f57..10fd9154cc46 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -2,7 +2,8 @@
/* Copyright 2017-2019 Qiang Yu <[email protected]> */
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
@@ -276,10 +277,7 @@ static const struct drm_driver lima_drm_driver = {
.patchlevel = 0,
.gem_create_object = lima_gem_create_object,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .gem_prime_mmap = drm_gem_prime_mmap,
};
struct lima_block_reader {
@@ -441,7 +439,7 @@ err_out0:
return err;
}
-static int lima_pdev_remove(struct platform_device *pdev)
+static void lima_pdev_remove(struct platform_device *pdev)
{
struct lima_device *ldev = platform_get_drvdata(pdev);
struct drm_device *ddev = ldev->ddev;
@@ -459,7 +457,6 @@ static int lima_pdev_remove(struct platform_device *pdev)
drm_dev_put(ddev);
lima_sched_slab_fini();
- return 0;
}
static const struct of_device_id dt_match[] = {
@@ -476,7 +473,7 @@ static const struct dev_pm_ops lima_pm_ops = {
static struct platform_driver lima_platform_driver = {
.probe = lima_pdev_probe,
- .remove = lima_pdev_remove,
+ .remove_new = lima_pdev_remove,
.driver = {
.name = "lima",
.pm = &lima_pm_ops,
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 10252dc11a22..4f9736e5f929 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
new_size = min(new_size, bo->base.base.size);
- mutex_lock(&bo->base.pages_lock);
+ dma_resv_lock(bo->base.base.resv, NULL);
if (bo->base.pages) {
pages = bo->base.pages;
@@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
if (!pages) {
- mutex_unlock(&bo->base.pages_lock);
+ dma_resv_unlock(bo->base.base.resv);
return -ENOMEM;
}
@@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
struct page *page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
- mutex_unlock(&bo->base.pages_lock);
+ dma_resv_unlock(bo->base.base.resv);
return PTR_ERR(page);
}
pages[i] = page;
}
- mutex_unlock(&bo->base.pages_lock);
+ dma_resv_unlock(bo->base.base.resv);
ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
new_size, GFP_KERNEL);
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index ff003403fbbc..ffd91a5ee299 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -165,7 +165,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
struct lima_sched_context *context)
{
- drm_sched_entity_fini(&context->base);
+ drm_sched_entity_destroy(&context->base);
}
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
index c35c453fd025..749debd3d6a5 100644
--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
@@ -466,7 +466,7 @@ error_early:
return ret;
}
-static int logicvc_drm_remove(struct platform_device *pdev)
+static void logicvc_drm_remove(struct platform_device *pdev)
{
struct logicvc_drm *logicvc = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -480,8 +480,6 @@ static int logicvc_drm_remove(struct platform_device *pdev)
logicvc_clocks_unprepare(logicvc);
of_reserved_mem_device_release(dev);
-
- return 0;
}
static const struct of_device_id logicvc_drm_of_table[] = {
@@ -493,7 +491,7 @@ MODULE_DEVICE_TABLE(of, logicvc_drm_of_table);
static struct platform_driver logicvc_drm_platform_driver = {
.probe = logicvc_drm_probe,
- .remove = logicvc_drm_remove,
+ .remove_new = logicvc_drm_remove,
.driver = {
.name = "logicvc-drm",
.of_match_table = logicvc_drm_of_table,
diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig
new file mode 100644
index 000000000000..df6946d505fa
--- /dev/null
+++ b/drivers/gpu/drm/loongson/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config DRM_LOONGSON
+ tristate "DRM support for Loongson Graphics"
+ depends on DRM && PCI && MMU
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ select I2C
+ select I2C_ALGOBIT
+ help
+ This is a DRM driver for Loongson Graphics, it may including
+ LS7A2000, LS7A1000, LS2K2000 and LS2K1000 etc. Loongson LS7A
+ series are bridge chipset, while Loongson LS2K series are SoC.
+
+ If "M" is selected, the module will be called loongson.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/loongson/Makefile b/drivers/gpu/drm/loongson/Makefile
new file mode 100644
index 000000000000..91e72bd900c1
--- /dev/null
+++ b/drivers/gpu/drm/loongson/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+
+loongson-y := \
+ lsdc_benchmark.o \
+ lsdc_crtc.o \
+ lsdc_debugfs.o \
+ lsdc_drv.o \
+ lsdc_gem.o \
+ lsdc_gfxpll.o \
+ lsdc_i2c.o \
+ lsdc_irq.o \
+ lsdc_output_7a1000.o \
+ lsdc_output_7a2000.o \
+ lsdc_plane.o \
+ lsdc_pixpll.o \
+ lsdc_probe.o \
+ lsdc_ttm.o
+
+loongson-y += loongson_device.o \
+ loongson_module.o
+
+obj-$(CONFIG_DRM_LOONGSON) += loongson.o
diff --git a/drivers/gpu/drm/loongson/loongson_device.c b/drivers/gpu/drm/loongson/loongson_device.c
new file mode 100644
index 000000000000..9986c8a2a255
--- /dev/null
+++ b/drivers/gpu/drm/loongson/loongson_device.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/pci.h>
+
+#include "lsdc_drv.h"
+
+static const struct lsdc_kms_funcs ls7a1000_kms_funcs = {
+ .create_i2c = lsdc_create_i2c_chan,
+ .irq_handler = ls7a1000_dc_irq_handler,
+ .output_init = ls7a1000_output_init,
+ .cursor_plane_init = ls7a1000_cursor_plane_init,
+ .primary_plane_init = lsdc_primary_plane_init,
+ .crtc_init = ls7a1000_crtc_init,
+};
+
+static const struct lsdc_kms_funcs ls7a2000_kms_funcs = {
+ .create_i2c = lsdc_create_i2c_chan,
+ .irq_handler = ls7a2000_dc_irq_handler,
+ .output_init = ls7a2000_output_init,
+ .cursor_plane_init = ls7a2000_cursor_plane_init,
+ .primary_plane_init = lsdc_primary_plane_init,
+ .crtc_init = ls7a2000_crtc_init,
+};
+
+static const struct loongson_gfx_desc ls7a1000_gfx = {
+ .dc = {
+ .num_of_crtc = 2,
+ .max_pixel_clk = 200000,
+ .max_width = 2048,
+ .max_height = 2048,
+ .num_of_hw_cursor = 1,
+ .hw_cursor_w = 32,
+ .hw_cursor_h = 32,
+ .pitch_align = 256,
+ .has_vblank_counter = false,
+ .funcs = &ls7a1000_kms_funcs,
+ },
+ .conf_reg_base = LS7A1000_CONF_REG_BASE,
+ .gfxpll = {
+ .reg_offset = LS7A1000_PLL_GFX_REG,
+ .reg_size = 8,
+ },
+ .pixpll = {
+ [0] = {
+ .reg_offset = LS7A1000_PIXPLL0_REG,
+ .reg_size = 8,
+ },
+ [1] = {
+ .reg_offset = LS7A1000_PIXPLL1_REG,
+ .reg_size = 8,
+ },
+ },
+ .chip_id = CHIP_LS7A1000,
+ .model = "LS7A1000 bridge chipset",
+};
+
+static const struct loongson_gfx_desc ls7a2000_gfx = {
+ .dc = {
+ .num_of_crtc = 2,
+ .max_pixel_clk = 350000,
+ .max_width = 4096,
+ .max_height = 4096,
+ .num_of_hw_cursor = 2,
+ .hw_cursor_w = 64,
+ .hw_cursor_h = 64,
+ .pitch_align = 64,
+ .has_vblank_counter = true,
+ .funcs = &ls7a2000_kms_funcs,
+ },
+ .conf_reg_base = LS7A2000_CONF_REG_BASE,
+ .gfxpll = {
+ .reg_offset = LS7A2000_PLL_GFX_REG,
+ .reg_size = 8,
+ },
+ .pixpll = {
+ [0] = {
+ .reg_offset = LS7A2000_PIXPLL0_REG,
+ .reg_size = 8,
+ },
+ [1] = {
+ .reg_offset = LS7A2000_PIXPLL1_REG,
+ .reg_size = 8,
+ },
+ },
+ .chip_id = CHIP_LS7A2000,
+ .model = "LS7A2000 bridge chipset",
+};
+
+static const struct lsdc_desc *__chip_id_desc_table[] = {
+ [CHIP_LS7A1000] = &ls7a1000_gfx.dc,
+ [CHIP_LS7A2000] = &ls7a2000_gfx.dc,
+ [CHIP_LS_LAST] = NULL,
+};
+
+const struct lsdc_desc *
+lsdc_device_probe(struct pci_dev *pdev, enum loongson_chip_id chip_id)
+{
+ return __chip_id_desc_table[chip_id];
+}
diff --git a/drivers/gpu/drm/loongson/loongson_module.c b/drivers/gpu/drm/loongson/loongson_module.c
new file mode 100644
index 000000000000..d2a51bd395f6
--- /dev/null
+++ b/drivers/gpu/drm/loongson/loongson_module.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/pci.h>
+
+#include <video/nomodeset.h>
+
+#include "loongson_module.h"
+
+static int loongson_modeset = -1;
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, loongson_modeset, int, 0400);
+
+int loongson_vblank = 1;
+MODULE_PARM_DESC(vblank, "Disable/Enable hw vblank support");
+module_param_named(vblank, loongson_vblank, int, 0400);
+
+static int __init loongson_module_init(void)
+{
+ if (!loongson_modeset || video_firmware_drivers_only())
+ return -ENODEV;
+
+ return pci_register_driver(&lsdc_pci_driver);
+}
+module_init(loongson_module_init);
+
+static void __exit loongson_module_exit(void)
+{
+ pci_unregister_driver(&lsdc_pci_driver);
+}
+module_exit(loongson_module_exit);
diff --git a/drivers/gpu/drm/loongson/loongson_module.h b/drivers/gpu/drm/loongson/loongson_module.h
new file mode 100644
index 000000000000..931c17521bf0
--- /dev/null
+++ b/drivers/gpu/drm/loongson/loongson_module.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LOONGSON_MODULE_H__
+#define __LOONGSON_MODULE_H__
+
+extern int loongson_vblank;
+extern struct pci_driver lsdc_pci_driver;
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_benchmark.c b/drivers/gpu/drm/loongson/lsdc_benchmark.c
new file mode 100644
index 000000000000..b088646a2ff9
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_benchmark.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_debugfs.h>
+
+#include "lsdc_benchmark.h"
+#include "lsdc_drv.h"
+#include "lsdc_gem.h"
+#include "lsdc_ttm.h"
+
+typedef void (*lsdc_copy_proc_t)(struct lsdc_bo *src_bo,
+ struct lsdc_bo *dst_bo,
+ unsigned int size,
+ int n);
+
+static void lsdc_copy_gtt_to_vram_cpu(struct lsdc_bo *src_bo,
+ struct lsdc_bo *dst_bo,
+ unsigned int size,
+ int n)
+{
+ lsdc_bo_kmap(src_bo);
+ lsdc_bo_kmap(dst_bo);
+
+ while (n--)
+ memcpy_toio(dst_bo->kptr, src_bo->kptr, size);
+
+ lsdc_bo_kunmap(src_bo);
+ lsdc_bo_kunmap(dst_bo);
+}
+
+static void lsdc_copy_vram_to_gtt_cpu(struct lsdc_bo *src_bo,
+ struct lsdc_bo *dst_bo,
+ unsigned int size,
+ int n)
+{
+ lsdc_bo_kmap(src_bo);
+ lsdc_bo_kmap(dst_bo);
+
+ while (n--)
+ memcpy_fromio(dst_bo->kptr, src_bo->kptr, size);
+
+ lsdc_bo_kunmap(src_bo);
+ lsdc_bo_kunmap(dst_bo);
+}
+
+static void lsdc_copy_gtt_to_gtt_cpu(struct lsdc_bo *src_bo,
+ struct lsdc_bo *dst_bo,
+ unsigned int size,
+ int n)
+{
+ lsdc_bo_kmap(src_bo);
+ lsdc_bo_kmap(dst_bo);
+
+ while (n--)
+ memcpy(dst_bo->kptr, src_bo->kptr, size);
+
+ lsdc_bo_kunmap(src_bo);
+ lsdc_bo_kunmap(dst_bo);
+}
+
+static void lsdc_benchmark_copy(struct lsdc_device *ldev,
+ unsigned int size,
+ unsigned int n,
+ u32 src_domain,
+ u32 dst_domain,
+ lsdc_copy_proc_t copy_proc,
+ struct drm_printer *p)
+{
+ struct drm_device *ddev = &ldev->base;
+ struct lsdc_bo *src_bo;
+ struct lsdc_bo *dst_bo;
+ unsigned long start_jiffies;
+ unsigned long end_jiffies;
+ unsigned int throughput;
+ unsigned int time;
+
+ src_bo = lsdc_bo_create_kernel_pinned(ddev, src_domain, size);
+ dst_bo = lsdc_bo_create_kernel_pinned(ddev, dst_domain, size);
+
+ start_jiffies = jiffies;
+
+ copy_proc(src_bo, dst_bo, size, n);
+
+ end_jiffies = jiffies;
+
+ lsdc_bo_free_kernel_pinned(src_bo);
+ lsdc_bo_free_kernel_pinned(dst_bo);
+
+ time = jiffies_to_msecs(end_jiffies - start_jiffies);
+
+ throughput = (n * (size >> 10)) / time;
+
+ drm_printf(p,
+ "Copy bo of %uKiB %u times from %s to %s in %ums: %uMB/s\n",
+ size >> 10, n,
+ lsdc_domain_to_str(src_domain),
+ lsdc_domain_to_str(dst_domain),
+ time, throughput);
+}
+
+int lsdc_show_benchmark_copy(struct lsdc_device *ldev, struct drm_printer *p)
+{
+ unsigned int buffer_size = 1920 * 1080 * 4;
+ unsigned int iteration = 60;
+
+ lsdc_benchmark_copy(ldev,
+ buffer_size,
+ iteration,
+ LSDC_GEM_DOMAIN_GTT,
+ LSDC_GEM_DOMAIN_GTT,
+ lsdc_copy_gtt_to_gtt_cpu,
+ p);
+
+ lsdc_benchmark_copy(ldev,
+ buffer_size,
+ iteration,
+ LSDC_GEM_DOMAIN_GTT,
+ LSDC_GEM_DOMAIN_VRAM,
+ lsdc_copy_gtt_to_vram_cpu,
+ p);
+
+ lsdc_benchmark_copy(ldev,
+ buffer_size,
+ iteration,
+ LSDC_GEM_DOMAIN_VRAM,
+ LSDC_GEM_DOMAIN_GTT,
+ lsdc_copy_vram_to_gtt_cpu,
+ p);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_benchmark.h b/drivers/gpu/drm/loongson/lsdc_benchmark.h
new file mode 100644
index 000000000000..36110278237e
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_benchmark.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_BENCHMARK_H__
+#define __LSDC_BENCHMARK_H__
+
+#include "lsdc_drv.h"
+
+int lsdc_show_benchmark_copy(struct lsdc_device *ldev, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_crtc.c b/drivers/gpu/drm/loongson/lsdc_crtc.c
new file mode 100644
index 000000000000..827acab580fa
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_crtc.c
@@ -0,0 +1,1024 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_vblank.h>
+
+#include "lsdc_drv.h"
+
+/*
+ * After the CRTC soft reset, the vblank counter would be reset to zero.
+ * But the address and other settings in the CRTC register remain the same
+ * as before.
+ */
+
+static void lsdc_crtc0_soft_reset(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+ u32 val;
+
+ val = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
+
+ val &= CFG_VALID_BITS_MASK;
+
+ /* Soft reset bit, active low */
+ val &= ~CFG_RESET_N;
+
+ val &= ~CFG_PIX_FMT_MASK;
+
+ lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val);
+
+ udelay(1);
+
+ val |= CFG_RESET_N | LSDC_PF_XRGB8888 | CFG_OUTPUT_ENABLE;
+
+ lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val);
+
+ /* Wait about a vblank time */
+ mdelay(20);
+}
+
+static void lsdc_crtc1_soft_reset(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+ u32 val;
+
+ val = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
+
+ val &= CFG_VALID_BITS_MASK;
+
+ /* Soft reset bit, active low */
+ val &= ~CFG_RESET_N;
+
+ val &= ~CFG_PIX_FMT_MASK;
+
+ lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val);
+
+ udelay(1);
+
+ val |= CFG_RESET_N | LSDC_PF_XRGB8888 | CFG_OUTPUT_ENABLE;
+
+ lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val);
+
+ /* Wait about a vblank time */
+ msleep(20);
+}
+
+static void lsdc_crtc0_enable(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+ u32 val;
+
+ val = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
+
+ /*
+ * This may happen in extremely rare cases, but a soft reset can
+ * bring it back to normal. We add a warning here, hoping to catch
+ * something if it happens.
+ */
+ if (val & CRTC_ANCHORED) {
+ drm_warn(&ldev->base, "%s stall\n", lcrtc->base.name);
+ return lsdc_crtc0_soft_reset(lcrtc);
+ }
+
+ lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val | CFG_OUTPUT_ENABLE);
+}
+
+static void lsdc_crtc0_disable(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_ureg32_clr(ldev, LSDC_CRTC0_CFG_REG, CFG_OUTPUT_ENABLE);
+
+ udelay(9);
+}
+
+static void lsdc_crtc1_enable(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+ u32 val;
+
+ /*
+ * This may happen in extremely rare cases, but a soft reset can
+ * bring it back to normal. We add a warning here, hoping to catch
+ * something if it happens.
+ */
+ val = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
+ if (val & CRTC_ANCHORED) {
+ drm_warn(&ldev->base, "%s stall\n", lcrtc->base.name);
+ return lsdc_crtc1_soft_reset(lcrtc);
+ }
+
+ lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val | CFG_OUTPUT_ENABLE);
+}
+
+static void lsdc_crtc1_disable(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_ureg32_clr(ldev, LSDC_CRTC1_CFG_REG, CFG_OUTPUT_ENABLE);
+
+ udelay(9);
+}
+
+/* All Loongson display controllers have hardware scanout position recoders */
+
+static void lsdc_crtc0_scan_pos(struct lsdc_crtc *lcrtc, int *hpos, int *vpos)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+ u32 val;
+
+ val = lsdc_rreg32(ldev, LSDC_CRTC0_SCAN_POS_REG);
+
+ *hpos = val >> 16;
+ *vpos = val & 0xffff;
+}
+
+static void lsdc_crtc1_scan_pos(struct lsdc_crtc *lcrtc, int *hpos, int *vpos)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+ u32 val;
+
+ val = lsdc_rreg32(ldev, LSDC_CRTC1_SCAN_POS_REG);
+
+ *hpos = val >> 16;
+ *vpos = val & 0xffff;
+}
+
+static void lsdc_crtc0_enable_vblank(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_ureg32_set(ldev, LSDC_INT_REG, INT_CRTC0_VSYNC_EN);
+}
+
+static void lsdc_crtc0_disable_vblank(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_ureg32_clr(ldev, LSDC_INT_REG, INT_CRTC0_VSYNC_EN);
+}
+
+static void lsdc_crtc1_enable_vblank(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_ureg32_set(ldev, LSDC_INT_REG, INT_CRTC1_VSYNC_EN);
+}
+
+static void lsdc_crtc1_disable_vblank(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_ureg32_clr(ldev, LSDC_INT_REG, INT_CRTC1_VSYNC_EN);
+}
+
+static void lsdc_crtc0_flip(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_ureg32_set(ldev, LSDC_CRTC0_CFG_REG, CFG_PAGE_FLIP);
+}
+
+static void lsdc_crtc1_flip(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_ureg32_set(ldev, LSDC_CRTC1_CFG_REG, CFG_PAGE_FLIP);
+}
+
+/*
+ * CRTC0 clone from CRTC1 or CRTC1 clone from CRTC0 using hardware logic
+ * This may be useful for custom cloning (TWIN) applications. Saving the
+ * bandwidth compared with the clone (mirroring) display mode provided by
+ * drm core.
+ */
+
+static void lsdc_crtc0_clone(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_ureg32_set(ldev, LSDC_CRTC0_CFG_REG, CFG_HW_CLONE);
+}
+
+static void lsdc_crtc1_clone(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_ureg32_set(ldev, LSDC_CRTC1_CFG_REG, CFG_HW_CLONE);
+}
+
+static void lsdc_crtc0_set_mode(struct lsdc_crtc *lcrtc,
+ const struct drm_display_mode *mode)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_wreg32(ldev, LSDC_CRTC0_HDISPLAY_REG,
+ (mode->crtc_htotal << 16) | mode->crtc_hdisplay);
+
+ lsdc_wreg32(ldev, LSDC_CRTC0_VDISPLAY_REG,
+ (mode->crtc_vtotal << 16) | mode->crtc_vdisplay);
+
+ lsdc_wreg32(ldev, LSDC_CRTC0_HSYNC_REG,
+ (mode->crtc_hsync_end << 16) | mode->crtc_hsync_start | HSYNC_EN);
+
+ lsdc_wreg32(ldev, LSDC_CRTC0_VSYNC_REG,
+ (mode->crtc_vsync_end << 16) | mode->crtc_vsync_start | VSYNC_EN);
+}
+
+static void lsdc_crtc1_set_mode(struct lsdc_crtc *lcrtc,
+ const struct drm_display_mode *mode)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_wreg32(ldev, LSDC_CRTC1_HDISPLAY_REG,
+ (mode->crtc_htotal << 16) | mode->crtc_hdisplay);
+
+ lsdc_wreg32(ldev, LSDC_CRTC1_VDISPLAY_REG,
+ (mode->crtc_vtotal << 16) | mode->crtc_vdisplay);
+
+ lsdc_wreg32(ldev, LSDC_CRTC1_HSYNC_REG,
+ (mode->crtc_hsync_end << 16) | mode->crtc_hsync_start | HSYNC_EN);
+
+ lsdc_wreg32(ldev, LSDC_CRTC1_VSYNC_REG,
+ (mode->crtc_vsync_end << 16) | mode->crtc_vsync_start | VSYNC_EN);
+}
+
+/*
+ * This is required for S3 support.
+ * After resuming from suspend, LSDC_CRTCx_CFG_REG (x = 0 or 1) is filled
+ * with garbage value, which causes the CRTC hang there.
+ *
+ * This function provides minimal settings for the affected registers.
+ * This overrides the firmware's settings on startup, making the CRTC work
+ * on our own, similar to the functional of GPU POST (Power On Self Test).
+ * Only touch CRTC hardware-related parts.
+ */
+
+static void lsdc_crtc0_reset(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, CFG_RESET_N | LSDC_PF_XRGB8888);
+}
+
+static void lsdc_crtc1_reset(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, CFG_RESET_N | LSDC_PF_XRGB8888);
+}
+
+static const struct lsdc_crtc_hw_ops ls7a1000_crtc_hw_ops[2] = {
+ {
+ .enable = lsdc_crtc0_enable,
+ .disable = lsdc_crtc0_disable,
+ .enable_vblank = lsdc_crtc0_enable_vblank,
+ .disable_vblank = lsdc_crtc0_disable_vblank,
+ .flip = lsdc_crtc0_flip,
+ .clone = lsdc_crtc0_clone,
+ .set_mode = lsdc_crtc0_set_mode,
+ .get_scan_pos = lsdc_crtc0_scan_pos,
+ .soft_reset = lsdc_crtc0_soft_reset,
+ .reset = lsdc_crtc0_reset,
+ },
+ {
+ .enable = lsdc_crtc1_enable,
+ .disable = lsdc_crtc1_disable,
+ .enable_vblank = lsdc_crtc1_enable_vblank,
+ .disable_vblank = lsdc_crtc1_disable_vblank,
+ .flip = lsdc_crtc1_flip,
+ .clone = lsdc_crtc1_clone,
+ .set_mode = lsdc_crtc1_set_mode,
+ .get_scan_pos = lsdc_crtc1_scan_pos,
+ .soft_reset = lsdc_crtc1_soft_reset,
+ .reset = lsdc_crtc1_reset,
+ },
+};
+
+/*
+ * The 32-bit hardware vblank counter has been available since LS7A2000
+ * and LS2K2000. The counter increases even though the CRTC is disabled,
+ * it will be reset only if the CRTC is being soft reset.
+ * Those registers are also readable for ls7a1000, but its value does not
+ * change.
+ */
+
+static u32 lsdc_crtc0_get_vblank_count(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ return lsdc_rreg32(ldev, LSDC_CRTC0_VSYNC_COUNTER_REG);
+}
+
+static u32 lsdc_crtc1_get_vblank_count(struct lsdc_crtc *lcrtc)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+
+ return lsdc_rreg32(ldev, LSDC_CRTC1_VSYNC_COUNTER_REG);
+}
+
+/*
+ * The DMA step bit fields are available since LS7A2000/LS2K2000, for
+ * supporting odd resolutions. But a large DMA step save the bandwidth.
+ * The larger, the better. Behavior of writing those bits on LS7A1000
+ * or LS2K1000 is underfined.
+ */
+
+static void lsdc_crtc0_set_dma_step(struct lsdc_crtc *lcrtc,
+ enum lsdc_dma_steps dma_step)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+ u32 val = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
+
+ val &= ~CFG_DMA_STEP_MASK;
+ val |= dma_step << CFG_DMA_STEP_SHIFT;
+
+ lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, val);
+}
+
+static void lsdc_crtc1_set_dma_step(struct lsdc_crtc *lcrtc,
+ enum lsdc_dma_steps dma_step)
+{
+ struct lsdc_device *ldev = lcrtc->ldev;
+ u32 val = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
+
+ val &= ~CFG_DMA_STEP_MASK;
+ val |= dma_step << CFG_DMA_STEP_SHIFT;
+
+ lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, val);
+}
+
+static const struct lsdc_crtc_hw_ops ls7a2000_crtc_hw_ops[2] = {
+ {
+ .enable = lsdc_crtc0_enable,
+ .disable = lsdc_crtc0_disable,
+ .enable_vblank = lsdc_crtc0_enable_vblank,
+ .disable_vblank = lsdc_crtc0_disable_vblank,
+ .flip = lsdc_crtc0_flip,
+ .clone = lsdc_crtc0_clone,
+ .set_mode = lsdc_crtc0_set_mode,
+ .soft_reset = lsdc_crtc0_soft_reset,
+ .get_scan_pos = lsdc_crtc0_scan_pos,
+ .set_dma_step = lsdc_crtc0_set_dma_step,
+ .get_vblank_counter = lsdc_crtc0_get_vblank_count,
+ .reset = lsdc_crtc0_reset,
+ },
+ {
+ .enable = lsdc_crtc1_enable,
+ .disable = lsdc_crtc1_disable,
+ .enable_vblank = lsdc_crtc1_enable_vblank,
+ .disable_vblank = lsdc_crtc1_disable_vblank,
+ .flip = lsdc_crtc1_flip,
+ .clone = lsdc_crtc1_clone,
+ .set_mode = lsdc_crtc1_set_mode,
+ .get_scan_pos = lsdc_crtc1_scan_pos,
+ .soft_reset = lsdc_crtc1_soft_reset,
+ .set_dma_step = lsdc_crtc1_set_dma_step,
+ .get_vblank_counter = lsdc_crtc1_get_vblank_count,
+ .reset = lsdc_crtc1_reset,
+ },
+};
+
+static void lsdc_crtc_reset(struct drm_crtc *crtc)
+{
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+ const struct lsdc_crtc_hw_ops *ops = lcrtc->hw_ops;
+ struct lsdc_crtc_state *priv_crtc_state;
+
+ if (crtc->state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+ priv_crtc_state = kzalloc(sizeof(*priv_crtc_state), GFP_KERNEL);
+
+ if (!priv_crtc_state)
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, &priv_crtc_state->base);
+
+ /* Reset the CRTC hardware, this is required for S3 support */
+ ops->reset(lcrtc);
+}
+
+static void lsdc_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct lsdc_crtc_state *priv_state = to_lsdc_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(&priv_state->base);
+
+ kfree(priv_state);
+}
+
+static struct drm_crtc_state *
+lsdc_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct lsdc_crtc_state *new_priv_state;
+ struct lsdc_crtc_state *old_priv_state;
+
+ new_priv_state = kzalloc(sizeof(*new_priv_state), GFP_KERNEL);
+ if (!new_priv_state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new_priv_state->base);
+
+ old_priv_state = to_lsdc_crtc_state(crtc->state);
+
+ memcpy(&new_priv_state->pparms, &old_priv_state->pparms,
+ sizeof(new_priv_state->pparms));
+
+ return &new_priv_state->base;
+}
+
+static u32 lsdc_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+
+ /* 32-bit hardware vblank counter */
+ return lcrtc->hw_ops->get_vblank_counter(lcrtc);
+}
+
+static int lsdc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+
+ if (!lcrtc->has_vblank)
+ return -EINVAL;
+
+ lcrtc->hw_ops->enable_vblank(lcrtc);
+
+ return 0;
+}
+
+static void lsdc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+
+ if (!lcrtc->has_vblank)
+ return;
+
+ lcrtc->hw_ops->disable_vblank(lcrtc);
+}
+
+/*
+ * CRTC related debugfs
+ * Primary planes and cursor planes belong to the CRTC as well.
+ * For the sake of convenience, plane-related registers are also add here.
+ */
+
+#define REG_DEF(reg) { \
+ .name = __stringify_1(LSDC_##reg##_REG), \
+ .offset = LSDC_##reg##_REG, \
+}
+
+static const struct lsdc_reg32 lsdc_crtc_regs_array[2][21] = {
+ [0] = {
+ REG_DEF(CRTC0_CFG),
+ REG_DEF(CRTC0_FB_ORIGIN),
+ REG_DEF(CRTC0_DVO_CONF),
+ REG_DEF(CRTC0_HDISPLAY),
+ REG_DEF(CRTC0_HSYNC),
+ REG_DEF(CRTC0_VDISPLAY),
+ REG_DEF(CRTC0_VSYNC),
+ REG_DEF(CRTC0_GAMMA_INDEX),
+ REG_DEF(CRTC0_GAMMA_DATA),
+ REG_DEF(CRTC0_SYNC_DEVIATION),
+ REG_DEF(CRTC0_VSYNC_COUNTER),
+ REG_DEF(CRTC0_SCAN_POS),
+ REG_DEF(CRTC0_STRIDE),
+ REG_DEF(CRTC0_FB1_ADDR_HI),
+ REG_DEF(CRTC0_FB1_ADDR_LO),
+ REG_DEF(CRTC0_FB0_ADDR_HI),
+ REG_DEF(CRTC0_FB0_ADDR_LO),
+ REG_DEF(CURSOR0_CFG),
+ REG_DEF(CURSOR0_POSITION),
+ REG_DEF(CURSOR0_BG_COLOR),
+ REG_DEF(CURSOR0_FG_COLOR),
+ },
+ [1] = {
+ REG_DEF(CRTC1_CFG),
+ REG_DEF(CRTC1_FB_ORIGIN),
+ REG_DEF(CRTC1_DVO_CONF),
+ REG_DEF(CRTC1_HDISPLAY),
+ REG_DEF(CRTC1_HSYNC),
+ REG_DEF(CRTC1_VDISPLAY),
+ REG_DEF(CRTC1_VSYNC),
+ REG_DEF(CRTC1_GAMMA_INDEX),
+ REG_DEF(CRTC1_GAMMA_DATA),
+ REG_DEF(CRTC1_SYNC_DEVIATION),
+ REG_DEF(CRTC1_VSYNC_COUNTER),
+ REG_DEF(CRTC1_SCAN_POS),
+ REG_DEF(CRTC1_STRIDE),
+ REG_DEF(CRTC1_FB1_ADDR_HI),
+ REG_DEF(CRTC1_FB1_ADDR_LO),
+ REG_DEF(CRTC1_FB0_ADDR_HI),
+ REG_DEF(CRTC1_FB0_ADDR_LO),
+ REG_DEF(CURSOR1_CFG),
+ REG_DEF(CURSOR1_POSITION),
+ REG_DEF(CURSOR1_BG_COLOR),
+ REG_DEF(CURSOR1_FG_COLOR),
+ },
+};
+
+static int lsdc_crtc_show_regs(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
+ struct lsdc_device *ldev = lcrtc->ldev;
+ unsigned int i;
+
+ for (i = 0; i < lcrtc->nreg; i++) {
+ const struct lsdc_reg32 *preg = &lcrtc->preg[i];
+ u32 offset = preg->offset;
+
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ preg->name, offset, lsdc_rreg32(ldev, offset));
+ }
+
+ return 0;
+}
+
+static int lsdc_crtc_show_scan_position(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
+ int x, y;
+
+ lcrtc->hw_ops->get_scan_pos(lcrtc, &x, &y);
+ seq_printf(m, "Scanout position: x: %08u, y: %08u\n", x, y);
+
+ return 0;
+}
+
+static int lsdc_crtc_show_vblank_counter(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
+
+ if (lcrtc->hw_ops->get_vblank_counter)
+ seq_printf(m, "%s vblank counter: %08u\n\n", lcrtc->base.name,
+ lcrtc->hw_ops->get_vblank_counter(lcrtc));
+
+ return 0;
+}
+
+static int lsdc_pixpll_show_clock(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct lsdc_crtc *lcrtc = (struct lsdc_crtc *)node->info_ent->data;
+ struct lsdc_pixpll *pixpll = &lcrtc->pixpll;
+ const struct lsdc_pixpll_funcs *funcs = pixpll->funcs;
+ struct drm_crtc *crtc = &lcrtc->base;
+ struct drm_display_mode *mode = &crtc->state->mode;
+ struct drm_printer printer = drm_seq_file_printer(m);
+ unsigned int out_khz;
+
+ out_khz = funcs->get_rate(pixpll);
+
+ seq_printf(m, "%s: %dx%d@%d\n", crtc->name,
+ mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode));
+
+ seq_printf(m, "Pixel clock required: %d kHz\n", mode->clock);
+ seq_printf(m, "Actual frequency output: %u kHz\n", out_khz);
+ seq_printf(m, "Diff: %d kHz\n", out_khz - mode->clock);
+
+ funcs->print(pixpll, &printer);
+
+ return 0;
+}
+
+static struct drm_info_list lsdc_crtc_debugfs_list[2][4] = {
+ [0] = {
+ { "regs", lsdc_crtc_show_regs, 0, NULL },
+ { "pixclk", lsdc_pixpll_show_clock, 0, NULL },
+ { "scanpos", lsdc_crtc_show_scan_position, 0, NULL },
+ { "vblanks", lsdc_crtc_show_vblank_counter, 0, NULL },
+ },
+ [1] = {
+ { "regs", lsdc_crtc_show_regs, 0, NULL },
+ { "pixclk", lsdc_pixpll_show_clock, 0, NULL },
+ { "scanpos", lsdc_crtc_show_scan_position, 0, NULL },
+ { "vblanks", lsdc_crtc_show_vblank_counter, 0, NULL },
+ },
+};
+
+/* operate manually */
+
+static int lsdc_crtc_man_op_show(struct seq_file *m, void *data)
+{
+ seq_puts(m, "soft_reset: soft reset this CRTC\n");
+ seq_puts(m, "enable: enable this CRTC\n");
+ seq_puts(m, "disable: disable this CRTC\n");
+ seq_puts(m, "flip: trigger the page flip\n");
+ seq_puts(m, "clone: clone the another crtc with hardware logic\n");
+
+ return 0;
+}
+
+static int lsdc_crtc_man_op_open(struct inode *inode, struct file *file)
+{
+ struct drm_crtc *crtc = inode->i_private;
+
+ return single_open(file, lsdc_crtc_man_op_show, crtc);
+}
+
+static ssize_t lsdc_crtc_man_op_write(struct file *file,
+ const char __user *ubuf,
+ size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct lsdc_crtc *lcrtc = m->private;
+ const struct lsdc_crtc_hw_ops *ops = lcrtc->hw_ops;
+ char buf[16];
+
+ if (len > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ if (sysfs_streq(buf, "soft_reset"))
+ ops->soft_reset(lcrtc);
+ else if (sysfs_streq(buf, "enable"))
+ ops->enable(lcrtc);
+ else if (sysfs_streq(buf, "disable"))
+ ops->disable(lcrtc);
+ else if (sysfs_streq(buf, "flip"))
+ ops->flip(lcrtc);
+ else if (sysfs_streq(buf, "clone"))
+ ops->clone(lcrtc);
+
+ return len;
+}
+
+static const struct file_operations lsdc_crtc_man_op_fops = {
+ .owner = THIS_MODULE,
+ .open = lsdc_crtc_man_op_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = lsdc_crtc_man_op_write,
+};
+
+static int lsdc_crtc_late_register(struct drm_crtc *crtc)
+{
+ struct lsdc_display_pipe *dispipe = crtc_to_display_pipe(crtc);
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+ struct drm_minor *minor = crtc->dev->primary;
+ unsigned int index = dispipe->index;
+ unsigned int i;
+
+ lcrtc->preg = lsdc_crtc_regs_array[index];
+ lcrtc->nreg = ARRAY_SIZE(lsdc_crtc_regs_array[index]);
+ lcrtc->p_info_list = lsdc_crtc_debugfs_list[index];
+ lcrtc->n_info_list = ARRAY_SIZE(lsdc_crtc_debugfs_list[index]);
+
+ for (i = 0; i < lcrtc->n_info_list; ++i)
+ lcrtc->p_info_list[i].data = lcrtc;
+
+ drm_debugfs_create_files(lcrtc->p_info_list, lcrtc->n_info_list,
+ crtc->debugfs_entry, minor);
+
+ /* Manual operations supported */
+ debugfs_create_file("ops", 0644, crtc->debugfs_entry, lcrtc,
+ &lsdc_crtc_man_op_fops);
+
+ return 0;
+}
+
+static void lsdc_crtc_atomic_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ const struct lsdc_crtc_state *priv_state;
+ const struct lsdc_pixpll_parms *pparms;
+
+ priv_state = container_of_const(state, struct lsdc_crtc_state, base);
+ pparms = &priv_state->pparms;
+
+ drm_printf(p, "\tInput clock divider = %u\n", pparms->div_ref);
+ drm_printf(p, "\tMedium clock multiplier = %u\n", pparms->loopc);
+ drm_printf(p, "\tOutput clock divider = %u\n", pparms->div_out);
+}
+
+static const struct drm_crtc_funcs ls7a1000_crtc_funcs = {
+ .reset = lsdc_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = lsdc_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = lsdc_crtc_atomic_destroy_state,
+ .late_register = lsdc_crtc_late_register,
+ .enable_vblank = lsdc_crtc_enable_vblank,
+ .disable_vblank = lsdc_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+ .atomic_print_state = lsdc_crtc_atomic_print_state,
+};
+
+static const struct drm_crtc_funcs ls7a2000_crtc_funcs = {
+ .reset = lsdc_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = lsdc_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = lsdc_crtc_atomic_destroy_state,
+ .late_register = lsdc_crtc_late_register,
+ .get_vblank_counter = lsdc_crtc_get_vblank_counter,
+ .enable_vblank = lsdc_crtc_enable_vblank,
+ .disable_vblank = lsdc_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+ .atomic_print_state = lsdc_crtc_atomic_print_state,
+};
+
+static enum drm_mode_status
+lsdc_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ const struct lsdc_desc *descp = ldev->descp;
+ unsigned int pitch;
+
+ if (mode->hdisplay > descp->max_width)
+ return MODE_BAD_HVALUE;
+
+ if (mode->vdisplay > descp->max_height)
+ return MODE_BAD_VVALUE;
+
+ if (mode->clock > descp->max_pixel_clk) {
+ drm_dbg_kms(ddev, "mode %dx%d, pixel clock=%d is too high\n",
+ mode->hdisplay, mode->vdisplay, mode->clock);
+ return MODE_CLOCK_HIGH;
+ }
+
+ /* 4 for DRM_FORMAT_XRGB8888 */
+ pitch = mode->hdisplay * 4;
+
+ if (pitch % descp->pitch_align) {
+ drm_dbg_kms(ddev, "align to %u bytes is required: %u\n",
+ descp->pitch_align, pitch);
+ return MODE_BAD_WIDTH;
+ }
+
+ return MODE_OK;
+}
+
+static int lsdc_pixpll_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+ struct lsdc_pixpll *pixpll = &lcrtc->pixpll;
+ const struct lsdc_pixpll_funcs *pfuncs = pixpll->funcs;
+ struct lsdc_crtc_state *priv_state = to_lsdc_crtc_state(state);
+ unsigned int clock = state->mode.clock;
+ int ret;
+
+ ret = pfuncs->compute(pixpll, clock, &priv_state->pparms);
+ if (ret) {
+ drm_warn(crtc->dev, "Failed to find PLL params for %ukHz\n",
+ clock);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int lsdc_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->enable)
+ return 0;
+
+ return lsdc_pixpll_atomic_check(crtc, crtc_state);
+}
+
+static void lsdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+ const struct lsdc_crtc_hw_ops *crtc_hw_ops = lcrtc->hw_ops;
+ struct lsdc_pixpll *pixpll = &lcrtc->pixpll;
+ const struct lsdc_pixpll_funcs *pixpll_funcs = pixpll->funcs;
+ struct drm_crtc_state *state = crtc->state;
+ struct drm_display_mode *mode = &state->mode;
+ struct lsdc_crtc_state *priv_state = to_lsdc_crtc_state(state);
+
+ pixpll_funcs->update(pixpll, &priv_state->pparms);
+
+ if (crtc_hw_ops->set_dma_step) {
+ unsigned int width_in_bytes = mode->hdisplay * 4;
+ enum lsdc_dma_steps dma_step;
+
+ /*
+ * Using DMA step as large as possible, for improving
+ * hardware DMA efficiency.
+ */
+ if (width_in_bytes % 256 == 0)
+ dma_step = LSDC_DMA_STEP_256_BYTES;
+ else if (width_in_bytes % 128 == 0)
+ dma_step = LSDC_DMA_STEP_128_BYTES;
+ else if (width_in_bytes % 64 == 0)
+ dma_step = LSDC_DMA_STEP_64_BYTES;
+ else /* width_in_bytes % 32 == 0 */
+ dma_step = LSDC_DMA_STEP_32_BYTES;
+
+ crtc_hw_ops->set_dma_step(lcrtc, dma_step);
+ }
+
+ crtc_hw_ops->set_mode(lcrtc, mode);
+}
+
+static void lsdc_crtc_send_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ unsigned long flags;
+
+ if (!crtc->state || !crtc->state->event)
+ return;
+
+ drm_dbg(ddev, "Send vblank manually\n");
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void lsdc_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+
+ if (lcrtc->has_vblank)
+ drm_crtc_vblank_on(crtc);
+
+ lcrtc->hw_ops->enable(lcrtc);
+}
+
+static void lsdc_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+
+ if (lcrtc->has_vblank)
+ drm_crtc_vblank_off(crtc);
+
+ lcrtc->hw_ops->disable(lcrtc);
+
+ /*
+ * Make sure we issue a vblank event after disabling the CRTC if
+ * someone was waiting it.
+ */
+ lsdc_crtc_send_vblank(crtc);
+}
+
+static void lsdc_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ else
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static bool lsdc_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos,
+ int *hpos,
+ ktime_t *stime,
+ ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+ const struct lsdc_crtc_hw_ops *ops = lcrtc->hw_ops;
+ int vsw, vbp, vactive_start, vactive_end, vfp_end;
+ int x, y;
+
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ vactive_start = vsw + vbp + 1;
+ vactive_end = vactive_start + mode->crtc_vdisplay;
+
+ /* last scan line before VSYNC */
+ vfp_end = mode->crtc_vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ ops->get_scan_pos(lcrtc, &x, &y);
+
+ if (y > vactive_end)
+ y = y - vfp_end - vactive_start;
+ else
+ y -= vactive_start;
+
+ *vpos = y;
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
+}
+
+static const struct drm_crtc_helper_funcs lsdc_crtc_helper_funcs = {
+ .mode_valid = lsdc_crtc_mode_valid,
+ .mode_set_nofb = lsdc_crtc_mode_set_nofb,
+ .atomic_enable = lsdc_crtc_atomic_enable,
+ .atomic_disable = lsdc_crtc_atomic_disable,
+ .atomic_check = lsdc_crtc_helper_atomic_check,
+ .atomic_flush = lsdc_crtc_atomic_flush,
+ .get_scanout_position = lsdc_crtc_get_scanout_position,
+};
+
+int ls7a1000_crtc_init(struct drm_device *ddev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ unsigned int index,
+ bool has_vblank)
+{
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+ int ret;
+
+ ret = lsdc_pixpll_init(&lcrtc->pixpll, ddev, index);
+ if (ret) {
+ drm_err(ddev, "pixel pll init failed: %d\n", ret);
+ return ret;
+ }
+
+ lcrtc->ldev = to_lsdc(ddev);
+ lcrtc->has_vblank = has_vblank;
+ lcrtc->hw_ops = &ls7a1000_crtc_hw_ops[index];
+
+ ret = drm_crtc_init_with_planes(ddev, crtc, primary, cursor,
+ &ls7a1000_crtc_funcs,
+ "LS-CRTC-%d", index);
+ if (ret) {
+ drm_err(ddev, "crtc init with planes failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &lsdc_crtc_helper_funcs);
+
+ ret = drm_mode_crtc_set_gamma_size(crtc, 256);
+ if (ret)
+ return ret;
+
+ drm_crtc_enable_color_mgmt(crtc, 0, false, 256);
+
+ return 0;
+}
+
+int ls7a2000_crtc_init(struct drm_device *ddev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ unsigned int index,
+ bool has_vblank)
+{
+ struct lsdc_crtc *lcrtc = to_lsdc_crtc(crtc);
+ int ret;
+
+ ret = lsdc_pixpll_init(&lcrtc->pixpll, ddev, index);
+ if (ret) {
+ drm_err(ddev, "crtc init with pll failed: %d\n", ret);
+ return ret;
+ }
+
+ lcrtc->ldev = to_lsdc(ddev);
+ lcrtc->has_vblank = has_vblank;
+ lcrtc->hw_ops = &ls7a2000_crtc_hw_ops[index];
+
+ ret = drm_crtc_init_with_planes(ddev, crtc, primary, cursor,
+ &ls7a2000_crtc_funcs,
+ "LS-CRTC-%u", index);
+ if (ret) {
+ drm_err(ddev, "crtc init with planes failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &lsdc_crtc_helper_funcs);
+
+ ret = drm_mode_crtc_set_gamma_size(crtc, 256);
+ if (ret)
+ return ret;
+
+ drm_crtc_enable_color_mgmt(crtc, 0, false, 256);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_debugfs.c b/drivers/gpu/drm/loongson/lsdc_debugfs.c
new file mode 100644
index 000000000000..b9c2e6b1701f
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_debugfs.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_debugfs.h>
+
+#include "lsdc_benchmark.h"
+#include "lsdc_drv.h"
+#include "lsdc_gem.h"
+#include "lsdc_probe.h"
+#include "lsdc_ttm.h"
+
+/* device level debugfs */
+
+static int lsdc_identify(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
+ const struct loongson_gfx_desc *gfx = to_loongson_gfx(ldev->descp);
+ u8 impl, rev;
+
+ loongson_cpu_get_prid(&impl, &rev);
+
+ seq_printf(m, "Running on cpu 0x%x, cpu revision: 0x%x\n",
+ impl, rev);
+
+ seq_printf(m, "Contained in: %s\n", gfx->model);
+
+ return 0;
+}
+
+static int lsdc_show_mm(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *ddev = node->minor->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_mm_print(&ddev->vma_offset_manager->vm_addr_space_mm, &p);
+
+ return 0;
+}
+
+static int lsdc_show_gfxpll_clock(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
+ struct drm_printer printer = drm_seq_file_printer(m);
+ struct loongson_gfxpll *gfxpll = ldev->gfxpll;
+
+ gfxpll->funcs->print(gfxpll, &printer, true);
+
+ return 0;
+}
+
+static int lsdc_show_benchmark(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
+ struct drm_printer printer = drm_seq_file_printer(m);
+
+ lsdc_show_benchmark_copy(ldev, &printer);
+
+ return 0;
+}
+
+static int lsdc_pdev_enable_io_mem(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct lsdc_device *ldev = (struct lsdc_device *)node->info_ent->data;
+ u16 cmd;
+
+ pci_read_config_word(ldev->dc, PCI_COMMAND, &cmd);
+
+ seq_printf(m, "PCI_COMMAND: 0x%x\n", cmd);
+
+ cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+
+ pci_write_config_word(ldev->dc, PCI_COMMAND, cmd);
+
+ pci_read_config_word(ldev->dc, PCI_COMMAND, &cmd);
+
+ seq_printf(m, "PCI_COMMAND: 0x%x\n", cmd);
+
+ return 0;
+}
+
+static struct drm_info_list lsdc_debugfs_list[] = {
+ { "benchmark", lsdc_show_benchmark, 0, NULL },
+ { "bos", lsdc_show_buffer_object, 0, NULL },
+ { "chips", lsdc_identify, 0, NULL },
+ { "clocks", lsdc_show_gfxpll_clock, 0, NULL },
+ { "dc_enable", lsdc_pdev_enable_io_mem, 0, NULL },
+ { "mm", lsdc_show_mm, 0, NULL },
+};
+
+void lsdc_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *ddev = minor->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ unsigned int n = ARRAY_SIZE(lsdc_debugfs_list);
+ unsigned int i;
+
+ for (i = 0; i < n; ++i)
+ lsdc_debugfs_list[i].data = ldev;
+
+ drm_debugfs_create_files(lsdc_debugfs_list, n, minor->debugfs_root, minor);
+
+ lsdc_ttm_debugfs_init(ldev);
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c
new file mode 100644
index 000000000000..188ec82afcfb
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_drv.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/pci.h>
+#include <linux/vgaarb.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_generic.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "loongson_module.h"
+#include "lsdc_drv.h"
+#include "lsdc_gem.h"
+#include "lsdc_ttm.h"
+
+#define DRIVER_AUTHOR "Sui Jingfeng <[email protected]>"
+#define DRIVER_NAME "loongson"
+#define DRIVER_DESC "drm driver for loongson graphics"
+#define DRIVER_DATE "20220701"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+DEFINE_DRM_GEM_FOPS(lsdc_gem_fops);
+
+static const struct drm_driver lsdc_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_RENDER | DRIVER_GEM | DRIVER_ATOMIC,
+ .fops = &lsdc_gem_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .debugfs_init = lsdc_debugfs_init,
+ .dumb_create = lsdc_dumb_create,
+ .dumb_map_offset = lsdc_dumb_map_offset,
+ .gem_prime_import_sg_table = lsdc_prime_import_sg_table,
+};
+
+static const struct drm_mode_config_funcs lsdc_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+/* Display related */
+
+static int lsdc_modeset_init(struct lsdc_device *ldev,
+ unsigned int num_crtc,
+ const struct lsdc_kms_funcs *funcs,
+ bool has_vblank)
+{
+ struct drm_device *ddev = &ldev->base;
+ struct lsdc_display_pipe *dispipe;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_crtc; i++) {
+ dispipe = &ldev->dispipe[i];
+
+ /* We need an index before crtc is initialized */
+ dispipe->index = i;
+
+ ret = funcs->create_i2c(ddev, dispipe, i);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_crtc; i++) {
+ struct i2c_adapter *ddc = NULL;
+
+ dispipe = &ldev->dispipe[i];
+ if (dispipe->li2c)
+ ddc = &dispipe->li2c->adapter;
+
+ ret = funcs->output_init(ddev, dispipe, ddc, i);
+ if (ret)
+ return ret;
+
+ ldev->num_output++;
+ }
+
+ for (i = 0; i < num_crtc; i++) {
+ dispipe = &ldev->dispipe[i];
+
+ ret = funcs->primary_plane_init(ddev, &dispipe->primary.base, i);
+ if (ret)
+ return ret;
+
+ ret = funcs->cursor_plane_init(ddev, &dispipe->cursor.base, i);
+ if (ret)
+ return ret;
+
+ ret = funcs->crtc_init(ddev, &dispipe->crtc.base,
+ &dispipe->primary.base,
+ &dispipe->cursor.base,
+ i, has_vblank);
+ if (ret)
+ return ret;
+ }
+
+ drm_info(ddev, "Total %u outputs\n", ldev->num_output);
+
+ return 0;
+}
+
+static const struct drm_mode_config_helper_funcs lsdc_mode_config_helper_funcs = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail,
+};
+
+static int lsdc_mode_config_init(struct drm_device *ddev,
+ const struct lsdc_desc *descp)
+{
+ int ret;
+
+ ret = drmm_mode_config_init(ddev);
+ if (ret)
+ return ret;
+
+ ddev->mode_config.funcs = &lsdc_mode_config_funcs;
+ ddev->mode_config.min_width = 1;
+ ddev->mode_config.min_height = 1;
+ ddev->mode_config.max_width = descp->max_width * LSDC_NUM_CRTC;
+ ddev->mode_config.max_height = descp->max_height * LSDC_NUM_CRTC;
+ ddev->mode_config.preferred_depth = 24;
+ ddev->mode_config.prefer_shadow = 1;
+
+ ddev->mode_config.cursor_width = descp->hw_cursor_h;
+ ddev->mode_config.cursor_height = descp->hw_cursor_h;
+
+ ddev->mode_config.helper_private = &lsdc_mode_config_helper_funcs;
+
+ if (descp->has_vblank_counter)
+ ddev->max_vblank_count = 0xffffffff;
+
+ return ret;
+}
+
+/*
+ * The GPU and display controller in the LS7A1000/LS7A2000/LS2K2000 are
+ * separated PCIE devices. They are two devices, not one. Bar 2 of the GPU
+ * device contains the base address and size of the VRAM, both the GPU and
+ * the DC could access the on-board VRAM.
+ */
+static int lsdc_get_dedicated_vram(struct lsdc_device *ldev,
+ struct pci_dev *pdev_dc,
+ const struct lsdc_desc *descp)
+{
+ struct drm_device *ddev = &ldev->base;
+ struct pci_dev *pdev_gpu;
+ resource_size_t base, size;
+
+ /*
+ * The GPU has 00:06.0 as its BDF, while the DC has 00:06.1
+ * This is true for the LS7A1000, LS7A2000 and LS2K2000.
+ */
+ pdev_gpu = pci_get_domain_bus_and_slot(pci_domain_nr(pdev_dc->bus),
+ pdev_dc->bus->number,
+ PCI_DEVFN(6, 0));
+ if (!pdev_gpu) {
+ drm_err(ddev, "No GPU device, then no VRAM\n");
+ return -ENODEV;
+ }
+
+ base = pci_resource_start(pdev_gpu, 2);
+ size = pci_resource_len(pdev_gpu, 2);
+
+ ldev->vram_base = base;
+ ldev->vram_size = size;
+ ldev->gpu = pdev_gpu;
+
+ drm_info(ddev, "Dedicated vram start: 0x%llx, size: %uMiB\n",
+ (u64)base, (u32)(size >> 20));
+
+ return 0;
+}
+
+static struct lsdc_device *
+lsdc_create_device(struct pci_dev *pdev,
+ const struct lsdc_desc *descp,
+ const struct drm_driver *driver)
+{
+ struct lsdc_device *ldev;
+ struct drm_device *ddev;
+ int ret;
+
+ ldev = devm_drm_dev_alloc(&pdev->dev, driver, struct lsdc_device, base);
+ if (IS_ERR(ldev))
+ return ldev;
+
+ ldev->dc = pdev;
+ ldev->descp = descp;
+
+ ddev = &ldev->base;
+
+ loongson_gfxpll_create(ddev, &ldev->gfxpll);
+
+ ret = lsdc_get_dedicated_vram(ldev, pdev, descp);
+ if (ret) {
+ drm_err(ddev, "Init VRAM failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = drm_aperture_remove_conflicting_framebuffers(ldev->vram_base,
+ ldev->vram_size,
+ driver);
+ if (ret) {
+ drm_err(ddev, "Remove firmware framebuffers failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = lsdc_ttm_init(ldev);
+ if (ret) {
+ drm_err(ddev, "Memory manager init failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ lsdc_gem_init(ddev);
+
+ /* Bar 0 of the DC device contains the MMIO register's base address */
+ ldev->reg_base = pcim_iomap(pdev, 0, 0);
+ if (!ldev->reg_base)
+ return ERR_PTR(-ENODEV);
+
+ spin_lock_init(&ldev->reglock);
+
+ ret = lsdc_mode_config_init(ddev, descp);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = lsdc_modeset_init(ldev, descp->num_of_crtc, descp->funcs,
+ loongson_vblank);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(ddev);
+
+ return ldev;
+}
+
+/* For multiple GPU driver instance co-exixt in the system */
+
+static unsigned int lsdc_vga_set_decode(struct pci_dev *pdev, bool state)
+{
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static int lsdc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct lsdc_desc *descp;
+ struct drm_device *ddev;
+ struct lsdc_device *ldev;
+ int ret;
+
+ descp = lsdc_device_probe(pdev, ent->driver_data);
+ if (IS_ERR_OR_NULL(descp))
+ return -ENODEV;
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Found %s, revision: %u",
+ to_loongson_gfx(descp)->model, pdev->revision);
+
+ ldev = lsdc_create_device(pdev, descp, &lsdc_drm_driver);
+ if (IS_ERR(ldev))
+ return PTR_ERR(ldev);
+
+ ddev = &ldev->base;
+
+ pci_set_drvdata(pdev, ddev);
+
+ vga_client_register(pdev, lsdc_vga_set_decode);
+
+ drm_kms_helper_poll_init(ddev);
+
+ if (loongson_vblank) {
+ ret = drm_vblank_init(ddev, descp->num_of_crtc);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, pdev->irq,
+ descp->funcs->irq_handler,
+ IRQF_SHARED,
+ dev_name(&pdev->dev), ddev);
+ if (ret) {
+ drm_err(ddev, "Failed to register interrupt: %d\n", ret);
+ return ret;
+ }
+
+ drm_info(ddev, "registered irq: %u\n", pdev->irq);
+ }
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ return ret;
+
+ drm_fbdev_generic_setup(ddev, 32);
+
+ return 0;
+}
+
+static void lsdc_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(ddev);
+ drm_atomic_helper_shutdown(ddev);
+}
+
+static int lsdc_drm_freeze(struct drm_device *ddev)
+{
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ struct lsdc_bo *lbo;
+ int ret;
+
+ /* unpin all of buffers in the VRAM */
+ mutex_lock(&ldev->gem.mutex);
+ list_for_each_entry(lbo, &ldev->gem.objects, list) {
+ struct ttm_buffer_object *tbo = &lbo->tbo;
+ struct ttm_resource *resource = tbo->resource;
+ unsigned int pin_count = tbo->pin_count;
+
+ drm_dbg(ddev, "bo[%p], size: %zuKiB, type: %s, pin count: %u\n",
+ lbo, lsdc_bo_size(lbo) >> 10,
+ lsdc_mem_type_to_str(resource->mem_type), pin_count);
+
+ if (!pin_count)
+ continue;
+
+ if (resource->mem_type == TTM_PL_VRAM) {
+ ret = lsdc_bo_reserve(lbo);
+ if (unlikely(ret)) {
+ drm_err(ddev, "bo reserve failed: %d\n", ret);
+ continue;
+ }
+
+ do {
+ lsdc_bo_unpin(lbo);
+ --pin_count;
+ } while (pin_count);
+
+ lsdc_bo_unreserve(lbo);
+ }
+ }
+ mutex_unlock(&ldev->gem.mutex);
+
+ lsdc_bo_evict_vram(ddev);
+
+ ret = drm_mode_config_helper_suspend(ddev);
+ if (unlikely(ret)) {
+ drm_err(ddev, "Freeze error: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lsdc_drm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ return drm_mode_config_helper_resume(ddev);
+}
+
+static int lsdc_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ return lsdc_drm_freeze(ddev);
+}
+
+static int lsdc_pm_thaw(struct device *dev)
+{
+ return lsdc_drm_resume(dev);
+}
+
+static int lsdc_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int error;
+
+ error = lsdc_pm_freeze(dev);
+ if (error)
+ return error;
+
+ pci_save_state(pdev);
+ /* Shut down the device */
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int lsdc_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ pci_set_power_state(pdev, PCI_D0);
+
+ pci_restore_state(pdev);
+
+ if (pcim_enable_device(pdev))
+ return -EIO;
+
+ return lsdc_pm_thaw(dev);
+}
+
+static const struct dev_pm_ops lsdc_pm_ops = {
+ .suspend = lsdc_pm_suspend,
+ .resume = lsdc_pm_resume,
+ .freeze = lsdc_pm_freeze,
+ .thaw = lsdc_pm_thaw,
+ .poweroff = lsdc_pm_freeze,
+ .restore = lsdc_pm_resume,
+};
+
+static const struct pci_device_id lsdc_pciid_list[] = {
+ {PCI_VDEVICE(LOONGSON, 0x7a06), CHIP_LS7A1000},
+ {PCI_VDEVICE(LOONGSON, 0x7a36), CHIP_LS7A2000},
+ { }
+};
+
+struct pci_driver lsdc_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = lsdc_pciid_list,
+ .probe = lsdc_pci_probe,
+ .remove = lsdc_pci_remove,
+ .driver.pm = &lsdc_pm_ops,
+};
+
+MODULE_DEVICE_TABLE(pci, lsdc_pciid_list);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.h b/drivers/gpu/drm/loongson/lsdc_drv.h
new file mode 100644
index 000000000000..fbf2d760ef27
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_drv.h
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_DRV_H__
+#define __LSDC_DRV_H__
+
+#include <linux/pci.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_file.h>
+#include <drm/drm_plane.h>
+#include <drm/ttm/ttm_device.h>
+
+#include "lsdc_i2c.h"
+#include "lsdc_irq.h"
+#include "lsdc_gfxpll.h"
+#include "lsdc_output.h"
+#include "lsdc_pixpll.h"
+#include "lsdc_regs.h"
+
+/* Currently, all Loongson display controllers have two display pipes. */
+#define LSDC_NUM_CRTC 2
+
+/*
+ * LS7A1000/LS7A2000 chipsets function as the south & north bridges of the
+ * Loongson 3 series processors, they are equipped with on-board video RAM
+ * typically. While Loongson LS2K series are low cost SoCs which share the
+ * system RAM as video RAM, they don't has a dedicated VRAM.
+ *
+ * There is only a 1:1 mapping of crtcs, encoders and connectors for the DC
+ *
+ * display pipe 0 = crtc0 + dvo0 + encoder0 + connector0 + cursor0 + primary0
+ * display pipe 1 = crtc1 + dvo1 + encoder1 + connectro1 + cursor1 + primary1
+ */
+
+enum loongson_chip_id {
+ CHIP_LS7A1000 = 0,
+ CHIP_LS7A2000 = 1,
+ CHIP_LS_LAST,
+};
+
+const struct lsdc_desc *
+lsdc_device_probe(struct pci_dev *pdev, enum loongson_chip_id chip);
+
+struct lsdc_kms_funcs;
+
+/* DC specific */
+
+struct lsdc_desc {
+ u32 num_of_crtc;
+ u32 max_pixel_clk;
+ u32 max_width;
+ u32 max_height;
+ u32 num_of_hw_cursor;
+ u32 hw_cursor_w;
+ u32 hw_cursor_h;
+ u32 pitch_align; /* CRTC DMA alignment constraint */
+ bool has_vblank_counter; /* 32 bit hw vsync counter */
+
+ /* device dependent ops, dc side */
+ const struct lsdc_kms_funcs *funcs;
+};
+
+/* GFX related resources wrangler */
+
+struct loongson_gfx_desc {
+ struct lsdc_desc dc;
+
+ u32 conf_reg_base;
+
+ /* GFXPLL shared by the DC, GMC and GPU */
+ struct {
+ u32 reg_offset;
+ u32 reg_size;
+ } gfxpll;
+
+ /* Pixel PLL, per display pipe */
+ struct {
+ u32 reg_offset;
+ u32 reg_size;
+ } pixpll[LSDC_NUM_CRTC];
+
+ enum loongson_chip_id chip_id;
+ char model[64];
+};
+
+static inline const struct loongson_gfx_desc *
+to_loongson_gfx(const struct lsdc_desc *dcp)
+{
+ return container_of_const(dcp, struct loongson_gfx_desc, dc);
+};
+
+struct lsdc_reg32 {
+ char *name;
+ u32 offset;
+};
+
+/* crtc hardware related ops */
+
+struct lsdc_crtc;
+
+struct lsdc_crtc_hw_ops {
+ void (*enable)(struct lsdc_crtc *lcrtc);
+ void (*disable)(struct lsdc_crtc *lcrtc);
+ void (*enable_vblank)(struct lsdc_crtc *lcrtc);
+ void (*disable_vblank)(struct lsdc_crtc *lcrtc);
+ void (*flip)(struct lsdc_crtc *lcrtc);
+ void (*clone)(struct lsdc_crtc *lcrtc);
+ void (*get_scan_pos)(struct lsdc_crtc *lcrtc, int *hpos, int *vpos);
+ void (*set_mode)(struct lsdc_crtc *lcrtc, const struct drm_display_mode *mode);
+ void (*soft_reset)(struct lsdc_crtc *lcrtc);
+ void (*reset)(struct lsdc_crtc *lcrtc);
+
+ u32 (*get_vblank_counter)(struct lsdc_crtc *lcrtc);
+ void (*set_dma_step)(struct lsdc_crtc *lcrtc, enum lsdc_dma_steps step);
+};
+
+struct lsdc_crtc {
+ struct drm_crtc base;
+ struct lsdc_pixpll pixpll;
+ struct lsdc_device *ldev;
+ const struct lsdc_crtc_hw_ops *hw_ops;
+ const struct lsdc_reg32 *preg;
+ unsigned int nreg;
+ struct drm_info_list *p_info_list;
+ unsigned int n_info_list;
+ bool has_vblank;
+};
+
+/* primary plane hardware related ops */
+
+struct lsdc_primary;
+
+struct lsdc_primary_plane_ops {
+ void (*update_fb_addr)(struct lsdc_primary *plane, u64 addr);
+ void (*update_fb_stride)(struct lsdc_primary *plane, u32 stride);
+ void (*update_fb_format)(struct lsdc_primary *plane,
+ const struct drm_format_info *format);
+};
+
+struct lsdc_primary {
+ struct drm_plane base;
+ const struct lsdc_primary_plane_ops *ops;
+ struct lsdc_device *ldev;
+};
+
+/* cursor plane hardware related ops */
+
+struct lsdc_cursor;
+
+struct lsdc_cursor_plane_ops {
+ void (*update_bo_addr)(struct lsdc_cursor *plane, u64 addr);
+ void (*update_cfg)(struct lsdc_cursor *plane,
+ enum lsdc_cursor_size cursor_size,
+ enum lsdc_cursor_format);
+ void (*update_position)(struct lsdc_cursor *plane, int x, int y);
+};
+
+struct lsdc_cursor {
+ struct drm_plane base;
+ const struct lsdc_cursor_plane_ops *ops;
+ struct lsdc_device *ldev;
+};
+
+struct lsdc_output {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static inline struct lsdc_output *
+connector_to_lsdc_output(struct drm_connector *connector)
+{
+ return container_of(connector, struct lsdc_output, connector);
+}
+
+static inline struct lsdc_output *
+encoder_to_lsdc_output(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct lsdc_output, encoder);
+}
+
+struct lsdc_display_pipe {
+ struct lsdc_crtc crtc;
+ struct lsdc_primary primary;
+ struct lsdc_cursor cursor;
+ struct lsdc_output output;
+ struct lsdc_i2c *li2c;
+ unsigned int index;
+};
+
+static inline struct lsdc_display_pipe *
+output_to_display_pipe(struct lsdc_output *output)
+{
+ return container_of(output, struct lsdc_display_pipe, output);
+}
+
+struct lsdc_kms_funcs {
+ irqreturn_t (*irq_handler)(int irq, void *arg);
+
+ int (*create_i2c)(struct drm_device *ddev,
+ struct lsdc_display_pipe *dispipe,
+ unsigned int index);
+
+ int (*output_init)(struct drm_device *ddev,
+ struct lsdc_display_pipe *dispipe,
+ struct i2c_adapter *ddc,
+ unsigned int index);
+
+ int (*cursor_plane_init)(struct drm_device *ddev,
+ struct drm_plane *plane,
+ unsigned int index);
+
+ int (*primary_plane_init)(struct drm_device *ddev,
+ struct drm_plane *plane,
+ unsigned int index);
+
+ int (*crtc_init)(struct drm_device *ddev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ unsigned int index,
+ bool has_vblank);
+};
+
+static inline struct lsdc_crtc *
+to_lsdc_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct lsdc_crtc, base);
+}
+
+static inline struct lsdc_display_pipe *
+crtc_to_display_pipe(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct lsdc_display_pipe, crtc.base);
+}
+
+static inline struct lsdc_primary *
+to_lsdc_primary(struct drm_plane *plane)
+{
+ return container_of(plane, struct lsdc_primary, base);
+}
+
+static inline struct lsdc_cursor *
+to_lsdc_cursor(struct drm_plane *plane)
+{
+ return container_of(plane, struct lsdc_cursor, base);
+}
+
+struct lsdc_crtc_state {
+ struct drm_crtc_state base;
+ struct lsdc_pixpll_parms pparms;
+};
+
+struct lsdc_gem {
+ /* @mutex: protect objects list */
+ struct mutex mutex;
+ struct list_head objects;
+};
+
+struct lsdc_device {
+ struct drm_device base;
+ struct ttm_device bdev;
+
+ /* @descp: features description of the DC variant */
+ const struct lsdc_desc *descp;
+ struct pci_dev *dc;
+ struct pci_dev *gpu;
+
+ struct loongson_gfxpll *gfxpll;
+
+ /* @reglock: protects concurrent access */
+ spinlock_t reglock;
+
+ void __iomem *reg_base;
+ resource_size_t vram_base;
+ resource_size_t vram_size;
+
+ resource_size_t gtt_base;
+ resource_size_t gtt_size;
+
+ struct lsdc_display_pipe dispipe[LSDC_NUM_CRTC];
+
+ struct lsdc_gem gem;
+
+ u32 irq_status;
+
+ /* tracking pinned memory */
+ size_t vram_pinned_size;
+ size_t gtt_pinned_size;
+
+ /* @num_output: count the number of active display pipe */
+ unsigned int num_output;
+};
+
+static inline struct lsdc_device *tdev_to_ldev(struct ttm_device *bdev)
+{
+ return container_of(bdev, struct lsdc_device, bdev);
+}
+
+static inline struct lsdc_device *to_lsdc(struct drm_device *ddev)
+{
+ return container_of(ddev, struct lsdc_device, base);
+}
+
+static inline struct lsdc_crtc_state *
+to_lsdc_crtc_state(struct drm_crtc_state *base)
+{
+ return container_of(base, struct lsdc_crtc_state, base);
+}
+
+void lsdc_debugfs_init(struct drm_minor *minor);
+
+int ls7a1000_crtc_init(struct drm_device *ddev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ unsigned int index,
+ bool no_vblank);
+
+int ls7a2000_crtc_init(struct drm_device *ddev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ unsigned int index,
+ bool no_vblank);
+
+int lsdc_primary_plane_init(struct drm_device *ddev,
+ struct drm_plane *plane,
+ unsigned int index);
+
+int ls7a1000_cursor_plane_init(struct drm_device *ddev,
+ struct drm_plane *plane,
+ unsigned int index);
+
+int ls7a2000_cursor_plane_init(struct drm_device *ddev,
+ struct drm_plane *plane,
+ unsigned int index);
+
+/* Registers access helpers */
+
+static inline u32 lsdc_rreg32(struct lsdc_device *ldev, u32 offset)
+{
+ return readl(ldev->reg_base + offset);
+}
+
+static inline void lsdc_wreg32(struct lsdc_device *ldev, u32 offset, u32 val)
+{
+ writel(val, ldev->reg_base + offset);
+}
+
+static inline void lsdc_ureg32_set(struct lsdc_device *ldev,
+ u32 offset,
+ u32 mask)
+{
+ void __iomem *addr = ldev->reg_base + offset;
+ u32 val = readl(addr);
+
+ writel(val | mask, addr);
+}
+
+static inline void lsdc_ureg32_clr(struct lsdc_device *ldev,
+ u32 offset,
+ u32 mask)
+{
+ void __iomem *addr = ldev->reg_base + offset;
+ u32 val = readl(addr);
+
+ writel(val & ~mask, addr);
+}
+
+static inline u32 lsdc_pipe_rreg32(struct lsdc_device *ldev,
+ u32 offset, u32 pipe)
+{
+ return readl(ldev->reg_base + offset + pipe * CRTC_PIPE_OFFSET);
+}
+
+static inline void lsdc_pipe_wreg32(struct lsdc_device *ldev,
+ u32 offset, u32 pipe, u32 val)
+{
+ writel(val, ldev->reg_base + offset + pipe * CRTC_PIPE_OFFSET);
+}
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c b/drivers/gpu/drm/loongson/lsdc_gem.c
new file mode 100644
index 000000000000..04293df2f0de
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_gem.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/dma-buf.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_gem.h"
+#include "lsdc_ttm.h"
+
+static int lsdc_gem_prime_pin(struct drm_gem_object *obj)
+{
+ struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
+ int ret;
+
+ ret = lsdc_bo_reserve(lbo);
+ if (unlikely(ret))
+ return ret;
+
+ ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL);
+ if (likely(ret == 0))
+ lbo->sharing_count++;
+
+ lsdc_bo_unreserve(lbo);
+
+ return ret;
+}
+
+static void lsdc_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ struct lsdc_bo *lbo = gem_to_lsdc_bo(obj);
+ int ret;
+
+ ret = lsdc_bo_reserve(lbo);
+ if (unlikely(ret))
+ return;
+
+ lsdc_bo_unpin(lbo);
+ if (lbo->sharing_count)
+ lbo->sharing_count--;
+
+ lsdc_bo_unreserve(lbo);
+}
+
+static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct ttm_buffer_object *tbo = to_ttm_bo(obj);
+ struct ttm_tt *tt = tbo->ttm;
+
+ if (!tt) {
+ drm_err(obj->dev, "sharing a buffer without backing memory\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return drm_prime_pages_to_sg(obj->dev, tt->pages, tt->num_pages);
+}
+
+static void lsdc_gem_object_free(struct drm_gem_object *obj)
+{
+ struct ttm_buffer_object *tbo = to_ttm_bo(obj);
+
+ if (tbo)
+ ttm_bo_put(tbo);
+}
+
+static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ struct ttm_buffer_object *tbo = to_ttm_bo(obj);
+ struct lsdc_bo *lbo = to_lsdc_bo(tbo);
+ int ret;
+
+ if (lbo->vmap_count > 0) {
+ ++lbo->vmap_count;
+ goto out;
+ }
+
+ ret = lsdc_bo_pin(lbo, 0, NULL);
+ if (unlikely(ret)) {
+ drm_err(obj->dev, "pin %p for vmap failed\n", lbo);
+ return ret;
+ }
+
+ ret = ttm_bo_vmap(tbo, &lbo->map);
+ if (ret) {
+ drm_err(obj->dev, "ttm bo vmap failed\n");
+ lsdc_bo_unpin(lbo);
+ return ret;
+ }
+
+ lbo->vmap_count = 1;
+
+out:
+ *map = lbo->map;
+
+ return 0;
+}
+
+static void lsdc_gem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ struct ttm_buffer_object *tbo = to_ttm_bo(obj);
+ struct lsdc_bo *lbo = to_lsdc_bo(tbo);
+
+ if (unlikely(!lbo->vmap_count)) {
+ drm_warn(obj->dev, "%p is not mapped\n", lbo);
+ return;
+ }
+
+ --lbo->vmap_count;
+ if (lbo->vmap_count == 0) {
+ ttm_bo_vunmap(tbo, &lbo->map);
+
+ lsdc_bo_unpin(lbo);
+ }
+}
+
+static int lsdc_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *tbo = to_ttm_bo(obj);
+ int ret;
+
+ ret = ttm_bo_mmap_obj(vma, tbo);
+ if (unlikely(ret)) {
+ drm_warn(obj->dev, "mmap %p failed\n", tbo);
+ return ret;
+ }
+
+ drm_gem_object_put(obj);
+
+ return 0;
+}
+
+static const struct drm_gem_object_funcs lsdc_gem_object_funcs = {
+ .free = lsdc_gem_object_free,
+ .export = drm_gem_prime_export,
+ .pin = lsdc_gem_prime_pin,
+ .unpin = lsdc_gem_prime_unpin,
+ .get_sg_table = lsdc_gem_prime_get_sg_table,
+ .vmap = lsdc_gem_object_vmap,
+ .vunmap = lsdc_gem_object_vunmap,
+ .mmap = lsdc_gem_object_mmap,
+};
+
+struct drm_gem_object *lsdc_gem_object_create(struct drm_device *ddev,
+ u32 domain,
+ size_t size,
+ bool kerenl,
+ struct sg_table *sg,
+ struct dma_resv *resv)
+{
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ struct drm_gem_object *gobj;
+ struct lsdc_bo *lbo;
+ int ret;
+
+ lbo = lsdc_bo_create(ddev, domain, size, kerenl, sg, resv);
+ if (IS_ERR(lbo)) {
+ ret = PTR_ERR(lbo);
+ return ERR_PTR(ret);
+ }
+
+ if (!sg) {
+ /* VRAM is filled with random data */
+ lsdc_bo_clear(lbo);
+ }
+
+ gobj = &lbo->tbo.base;
+ gobj->funcs = &lsdc_gem_object_funcs;
+
+ /* tracking the BOs we created */
+ mutex_lock(&ldev->gem.mutex);
+ list_add_tail(&lbo->list, &ldev->gem.objects);
+ mutex_unlock(&ldev->gem.mutex);
+
+ return gobj;
+}
+
+struct drm_gem_object *
+lsdc_prime_import_sg_table(struct drm_device *ddev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg)
+{
+ struct dma_resv *resv = attach->dmabuf->resv;
+ u64 size = attach->dmabuf->size;
+ struct drm_gem_object *gobj;
+ struct lsdc_bo *lbo;
+
+ dma_resv_lock(resv, NULL);
+ gobj = lsdc_gem_object_create(ddev, LSDC_GEM_DOMAIN_GTT, size, false,
+ sg, resv);
+ dma_resv_unlock(resv);
+
+ if (IS_ERR(gobj)) {
+ drm_err(ddev, "Failed to import sg table\n");
+ return gobj;
+ }
+
+ lbo = gem_to_lsdc_bo(gobj);
+ lbo->sharing_count = 1;
+
+ return gobj;
+}
+
+int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev,
+ struct drm_mode_create_dumb *args)
+{
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ const struct lsdc_desc *descp = ldev->descp;
+ u32 domain = LSDC_GEM_DOMAIN_VRAM;
+ struct drm_gem_object *gobj;
+ size_t size;
+ u32 pitch;
+ u32 handle;
+ int ret;
+
+ if (!args->width || !args->height)
+ return -EINVAL;
+
+ if (args->bpp != 32 && args->bpp != 16)
+ return -EINVAL;
+
+ pitch = args->width * args->bpp / 8;
+ pitch = ALIGN(pitch, descp->pitch_align);
+ size = pitch * args->height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ /* Maximum single bo size allowed is the half vram size available */
+ if (size > ldev->vram_size / 2) {
+ drm_err(ddev, "Requesting(%zuMiB) failed\n", size >> 20);
+ return -ENOMEM;
+ }
+
+ gobj = lsdc_gem_object_create(ddev, domain, size, false, NULL, NULL);
+ if (IS_ERR(gobj)) {
+ drm_err(ddev, "Failed to create gem object\n");
+ return PTR_ERR(gobj);
+ }
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+
+ /* drop reference from allocate, handle holds it now */
+ drm_gem_object_put(gobj);
+ if (ret)
+ return ret;
+
+ args->pitch = pitch;
+ args->size = size;
+ args->handle = handle;
+
+ return 0;
+}
+
+int lsdc_dumb_map_offset(struct drm_file *filp, struct drm_device *ddev,
+ u32 handle, uint64_t *offset)
+{
+ struct drm_gem_object *gobj;
+
+ gobj = drm_gem_object_lookup(filp, handle);
+ if (!gobj)
+ return -ENOENT;
+
+ *offset = drm_vma_node_offset_addr(&gobj->vma_node);
+
+ drm_gem_object_put(gobj);
+
+ return 0;
+}
+
+void lsdc_gem_init(struct drm_device *ddev)
+{
+ struct lsdc_device *ldev = to_lsdc(ddev);
+
+ mutex_init(&ldev->gem.mutex);
+ INIT_LIST_HEAD(&ldev->gem.objects);
+}
+
+int lsdc_show_buffer_object(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *ddev = node->minor->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ struct lsdc_bo *lbo;
+ unsigned int i;
+
+ mutex_lock(&ldev->gem.mutex);
+
+ i = 0;
+
+ list_for_each_entry(lbo, &ldev->gem.objects, list) {
+ struct ttm_buffer_object *tbo = &lbo->tbo;
+ struct ttm_resource *resource = tbo->resource;
+
+ seq_printf(m, "bo[%04u][%p]: size: %8zuKiB %s offset: %8llx\n",
+ i, lbo, lsdc_bo_size(lbo) >> 10,
+ lsdc_mem_type_to_str(resource->mem_type),
+ lsdc_bo_gpu_offset(lbo));
+ i++;
+ }
+
+ mutex_unlock(&ldev->gem.mutex);
+
+ seq_printf(m, "Pinned BO size: VRAM: %zuKiB, GTT: %zu KiB\n",
+ ldev->vram_pinned_size >> 10, ldev->gtt_pinned_size >> 10);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.h b/drivers/gpu/drm/loongson/lsdc_gem.h
new file mode 100644
index 000000000000..92cbb10e6e13
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_gem.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_GEM_H__
+#define __LSDC_GEM_H__
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+
+struct drm_gem_object *
+lsdc_prime_import_sg_table(struct drm_device *ddev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg);
+
+int lsdc_dumb_map_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle,
+ uint64_t *offset);
+
+int lsdc_dumb_create(struct drm_file *file,
+ struct drm_device *ddev,
+ struct drm_mode_create_dumb *args);
+
+void lsdc_gem_init(struct drm_device *ddev);
+int lsdc_show_buffer_object(struct seq_file *m, void *arg);
+
+struct drm_gem_object *
+lsdc_gem_object_create(struct drm_device *ddev,
+ u32 domain,
+ size_t size,
+ bool kerenl,
+ struct sg_table *sg,
+ struct dma_resv *resv);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_gfxpll.c b/drivers/gpu/drm/loongson/lsdc_gfxpll.c
new file mode 100644
index 000000000000..249c09d703ad
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_gfxpll.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "lsdc_drv.h"
+
+/*
+ * GFX PLL is the PLL used by DC, GMC and GPU, the structure of the GFX PLL
+ * may suffer from change across chip variants.
+ *
+ *
+ * +-------------+ sel_out_dc
+ * +----| / div_out_0 | _____/ _____ DC
+ * | +-------------+
+ * refclk +---------+ +-------+ | +-------------+ sel_out_gmc
+ * ---+---> | div_ref | ---> | loopc | --+--> | / div_out_1 | _____/ _____ GMC
+ * | +---------+ +-------+ | +-------------+
+ * | / * | +-------------+ sel_out_gpu
+ * | +----| / div_out_2 | _____/ _____ GPU
+ * | +-------------+
+ * | ^
+ * | |
+ * +--------------------------- bypass ----------------------+
+ */
+
+struct loongson_gfxpll_bitmap {
+ /* Byte 0 ~ Byte 3 */
+ unsigned div_out_dc : 7; /* 6 : 0 DC output clock divider */
+ unsigned div_out_gmc : 7; /* 13 : 7 GMC output clock divider */
+ unsigned div_out_gpu : 7; /* 20 : 14 GPU output clock divider */
+ unsigned loopc : 9; /* 29 : 21 clock multiplier */
+ unsigned _reserved_1_ : 2; /* 31 : 30 */
+
+ /* Byte 4 ~ Byte 7 */
+ unsigned div_ref : 7; /* 38 : 32 Input clock divider */
+ unsigned locked : 1; /* 39 PLL locked indicator */
+ unsigned sel_out_dc : 1; /* 40 dc output clk enable */
+ unsigned sel_out_gmc : 1; /* 41 gmc output clk enable */
+ unsigned sel_out_gpu : 1; /* 42 gpu output clk enable */
+ unsigned set_param : 1; /* 43 Trigger the update */
+ unsigned bypass : 1; /* 44 */
+ unsigned powerdown : 1; /* 45 */
+ unsigned _reserved_2_ : 18; /* 46 : 63 no use */
+};
+
+union loongson_gfxpll_reg_bitmap {
+ struct loongson_gfxpll_bitmap bitmap;
+ u32 w[2];
+ u64 d;
+};
+
+static void __gfxpll_rreg(struct loongson_gfxpll *this,
+ union loongson_gfxpll_reg_bitmap *reg)
+{
+#if defined(CONFIG_64BIT)
+ reg->d = readq(this->mmio);
+#else
+ reg->w[0] = readl(this->mmio);
+ reg->w[1] = readl(this->mmio + 4);
+#endif
+}
+
+/* Update new parameters to the hardware */
+
+static int loongson_gfxpll_update(struct loongson_gfxpll * const this,
+ struct loongson_gfxpll_parms const *pin)
+{
+ /* None, TODO */
+
+ return 0;
+}
+
+static void loongson_gfxpll_get_rates(struct loongson_gfxpll * const this,
+ unsigned int *dc,
+ unsigned int *gmc,
+ unsigned int *gpu)
+{
+ struct loongson_gfxpll_parms *pparms = &this->parms;
+ union loongson_gfxpll_reg_bitmap gfxpll_reg;
+ unsigned int pre_output;
+ unsigned int dc_mhz;
+ unsigned int gmc_mhz;
+ unsigned int gpu_mhz;
+
+ __gfxpll_rreg(this, &gfxpll_reg);
+
+ pparms->div_ref = gfxpll_reg.bitmap.div_ref;
+ pparms->loopc = gfxpll_reg.bitmap.loopc;
+
+ pparms->div_out_dc = gfxpll_reg.bitmap.div_out_dc;
+ pparms->div_out_gmc = gfxpll_reg.bitmap.div_out_gmc;
+ pparms->div_out_gpu = gfxpll_reg.bitmap.div_out_gpu;
+
+ pre_output = pparms->ref_clock / pparms->div_ref * pparms->loopc;
+
+ dc_mhz = pre_output / pparms->div_out_dc / 1000;
+ gmc_mhz = pre_output / pparms->div_out_gmc / 1000;
+ gpu_mhz = pre_output / pparms->div_out_gpu / 1000;
+
+ if (dc)
+ *dc = dc_mhz;
+
+ if (gmc)
+ *gmc = gmc_mhz;
+
+ if (gpu)
+ *gpu = gpu_mhz;
+}
+
+static void loongson_gfxpll_print(struct loongson_gfxpll * const this,
+ struct drm_printer *p,
+ bool verbose)
+{
+ struct loongson_gfxpll_parms *parms = &this->parms;
+ unsigned int dc, gmc, gpu;
+
+ if (verbose) {
+ drm_printf(p, "reference clock: %u\n", parms->ref_clock);
+ drm_printf(p, "div_ref = %u\n", parms->div_ref);
+ drm_printf(p, "loopc = %u\n", parms->loopc);
+
+ drm_printf(p, "div_out_dc = %u\n", parms->div_out_dc);
+ drm_printf(p, "div_out_gmc = %u\n", parms->div_out_gmc);
+ drm_printf(p, "div_out_gpu = %u\n", parms->div_out_gpu);
+ }
+
+ this->funcs->get_rates(this, &dc, &gmc, &gpu);
+
+ drm_printf(p, "dc: %uMHz, gmc: %uMHz, gpu: %uMHz\n", dc, gmc, gpu);
+}
+
+/* GFX (DC, GPU, GMC) PLL initialization and destroy function */
+
+static void loongson_gfxpll_fini(struct drm_device *ddev, void *data)
+{
+ struct loongson_gfxpll *this = (struct loongson_gfxpll *)data;
+
+ iounmap(this->mmio);
+
+ kfree(this);
+}
+
+static int loongson_gfxpll_init(struct loongson_gfxpll * const this)
+{
+ struct loongson_gfxpll_parms *pparms = &this->parms;
+ struct drm_printer printer = drm_info_printer(this->ddev->dev);
+
+ pparms->ref_clock = LSDC_PLL_REF_CLK_KHZ;
+
+ this->mmio = ioremap(this->reg_base, this->reg_size);
+ if (IS_ERR_OR_NULL(this->mmio))
+ return -ENOMEM;
+
+ this->funcs->print(this, &printer, false);
+
+ return 0;
+}
+
+static const struct loongson_gfxpll_funcs lsdc_gmc_gpu_funcs = {
+ .init = loongson_gfxpll_init,
+ .update = loongson_gfxpll_update,
+ .get_rates = loongson_gfxpll_get_rates,
+ .print = loongson_gfxpll_print,
+};
+
+int loongson_gfxpll_create(struct drm_device *ddev,
+ struct loongson_gfxpll **ppout)
+{
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ const struct loongson_gfx_desc *gfx = to_loongson_gfx(ldev->descp);
+ struct loongson_gfxpll *this;
+ int ret;
+
+ this = kzalloc(sizeof(*this), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(this))
+ return -ENOMEM;
+
+ this->ddev = ddev;
+ this->reg_size = gfx->gfxpll.reg_size;
+ this->reg_base = gfx->conf_reg_base + gfx->gfxpll.reg_offset;
+ this->funcs = &lsdc_gmc_gpu_funcs;
+
+ ret = this->funcs->init(this);
+ if (unlikely(ret)) {
+ kfree(this);
+ return ret;
+ }
+
+ *ppout = this;
+
+ return drmm_add_action_or_reset(ddev, loongson_gfxpll_fini, this);
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_gfxpll.h b/drivers/gpu/drm/loongson/lsdc_gfxpll.h
new file mode 100644
index 000000000000..9d59cbfc145d
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_gfxpll.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_GFXPLL_H__
+#define __LSDC_GFXPLL_H__
+
+#include <drm/drm_device.h>
+
+struct loongson_gfxpll;
+
+struct loongson_gfxpll_parms {
+ unsigned int ref_clock;
+ unsigned int div_ref;
+ unsigned int loopc;
+ unsigned int div_out_dc;
+ unsigned int div_out_gmc;
+ unsigned int div_out_gpu;
+};
+
+struct loongson_gfxpll_funcs {
+ int (*init)(struct loongson_gfxpll * const this);
+
+ int (*update)(struct loongson_gfxpll * const this,
+ struct loongson_gfxpll_parms const *pin);
+
+ void (*get_rates)(struct loongson_gfxpll * const this,
+ unsigned int *dc, unsigned int *gmc, unsigned int *gpu);
+
+ void (*print)(struct loongson_gfxpll * const this,
+ struct drm_printer *printer, bool verbose);
+};
+
+struct loongson_gfxpll {
+ struct drm_device *ddev;
+ void __iomem *mmio;
+
+ /* PLL register offset */
+ u32 reg_base;
+ /* PLL register size in bytes */
+ u32 reg_size;
+
+ const struct loongson_gfxpll_funcs *funcs;
+
+ struct loongson_gfxpll_parms parms;
+};
+
+int loongson_gfxpll_create(struct drm_device *ddev,
+ struct loongson_gfxpll **ppout);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_i2c.c b/drivers/gpu/drm/loongson/lsdc_i2c.c
new file mode 100644
index 000000000000..9625d0b1d0b4
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_i2c.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_managed.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_output.h"
+
+/*
+ * __lsdc_gpio_i2c_set - set the state of a gpio pin indicated by mask
+ * @mask: gpio pin mask
+ * @state: "0" for low, "1" for high
+ */
+static void __lsdc_gpio_i2c_set(struct lsdc_i2c * const li2c, int mask, int state)
+{
+ struct lsdc_device *ldev = to_lsdc(li2c->ddev);
+ unsigned long flags;
+ u8 val;
+
+ spin_lock_irqsave(&ldev->reglock, flags);
+
+ if (state) {
+ /*
+ * Setting this pin as input directly, write 1 for input.
+ * The external pull-up resistor will pull the level up
+ */
+ val = readb(li2c->dir_reg);
+ val |= mask;
+ writeb(val, li2c->dir_reg);
+ } else {
+ /* First set this pin as output, write 0 for output */
+ val = readb(li2c->dir_reg);
+ val &= ~mask;
+ writeb(val, li2c->dir_reg);
+
+ /* Then, make this pin output 0 */
+ val = readb(li2c->dat_reg);
+ val &= ~mask;
+ writeb(val, li2c->dat_reg);
+ }
+
+ spin_unlock_irqrestore(&ldev->reglock, flags);
+}
+
+/*
+ * __lsdc_gpio_i2c_get - read value back from the gpio pin indicated by mask
+ * @mask: gpio pin mask
+ * return "0" for low, "1" for high
+ */
+static int __lsdc_gpio_i2c_get(struct lsdc_i2c * const li2c, int mask)
+{
+ struct lsdc_device *ldev = to_lsdc(li2c->ddev);
+ unsigned long flags;
+ u8 val;
+
+ spin_lock_irqsave(&ldev->reglock, flags);
+
+ /* First set this pin as input */
+ val = readb(li2c->dir_reg);
+ val |= mask;
+ writeb(val, li2c->dir_reg);
+
+ /* Then get level state from this pin */
+ val = readb(li2c->dat_reg);
+
+ spin_unlock_irqrestore(&ldev->reglock, flags);
+
+ return (val & mask) ? 1 : 0;
+}
+
+static void lsdc_gpio_i2c_set_sda(void *i2c, int state)
+{
+ struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
+ /* set state on the li2c->sda pin */
+ return __lsdc_gpio_i2c_set(li2c, li2c->sda, state);
+}
+
+static void lsdc_gpio_i2c_set_scl(void *i2c, int state)
+{
+ struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
+ /* set state on the li2c->scl pin */
+ return __lsdc_gpio_i2c_set(li2c, li2c->scl, state);
+}
+
+static int lsdc_gpio_i2c_get_sda(void *i2c)
+{
+ struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
+ /* read value from the li2c->sda pin */
+ return __lsdc_gpio_i2c_get(li2c, li2c->sda);
+}
+
+static int lsdc_gpio_i2c_get_scl(void *i2c)
+{
+ struct lsdc_i2c * const li2c = (struct lsdc_i2c *)i2c;
+ /* read the value from the li2c->scl pin */
+ return __lsdc_gpio_i2c_get(li2c, li2c->scl);
+}
+
+static void lsdc_destroy_i2c(struct drm_device *ddev, void *data)
+{
+ struct lsdc_i2c *li2c = (struct lsdc_i2c *)data;
+
+ if (li2c) {
+ i2c_del_adapter(&li2c->adapter);
+ kfree(li2c);
+ }
+}
+
+/*
+ * The DC in ls7a1000/ls7a2000/ls2k2000 has builtin gpio hardware
+ *
+ * @reg_base: gpio reg base
+ * @index: output channel index, 0 for PIPE0, 1 for PIPE1
+ */
+int lsdc_create_i2c_chan(struct drm_device *ddev,
+ struct lsdc_display_pipe *dispipe,
+ unsigned int index)
+{
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ struct i2c_adapter *adapter;
+ struct lsdc_i2c *li2c;
+ int ret;
+
+ li2c = kzalloc(sizeof(*li2c), GFP_KERNEL);
+ if (!li2c)
+ return -ENOMEM;
+
+ dispipe->li2c = li2c;
+
+ if (index == 0) {
+ li2c->sda = 0x01; /* pin 0 */
+ li2c->scl = 0x02; /* pin 1 */
+ } else if (index == 1) {
+ li2c->sda = 0x04; /* pin 2 */
+ li2c->scl = 0x08; /* pin 3 */
+ } else {
+ return -ENOENT;
+ }
+
+ li2c->ddev = ddev;
+ li2c->dir_reg = ldev->reg_base + LS7A_DC_GPIO_DIR_REG;
+ li2c->dat_reg = ldev->reg_base + LS7A_DC_GPIO_DAT_REG;
+
+ li2c->bit.setsda = lsdc_gpio_i2c_set_sda;
+ li2c->bit.setscl = lsdc_gpio_i2c_set_scl;
+ li2c->bit.getsda = lsdc_gpio_i2c_get_sda;
+ li2c->bit.getscl = lsdc_gpio_i2c_get_scl;
+ li2c->bit.udelay = 5;
+ li2c->bit.timeout = usecs_to_jiffies(2200);
+ li2c->bit.data = li2c;
+
+ adapter = &li2c->adapter;
+ adapter->algo_data = &li2c->bit;
+ adapter->owner = THIS_MODULE;
+ adapter->class = I2C_CLASS_DDC;
+ adapter->dev.parent = ddev->dev;
+ adapter->nr = -1;
+
+ snprintf(adapter->name, sizeof(adapter->name), "lsdc-i2c%u", index);
+
+ i2c_set_adapdata(adapter, li2c);
+
+ ret = i2c_bit_add_bus(adapter);
+ if (ret) {
+ kfree(li2c);
+ return ret;
+ }
+
+ ret = drmm_add_action_or_reset(ddev, lsdc_destroy_i2c, li2c);
+ if (ret)
+ return ret;
+
+ drm_info(ddev, "%s(sda pin mask=%u, scl pin mask=%u) created\n",
+ adapter->name, li2c->sda, li2c->scl);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_i2c.h b/drivers/gpu/drm/loongson/lsdc_i2c.h
new file mode 100644
index 000000000000..88cd1a1817a5
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_i2c.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_I2C_H__
+#define __LSDC_I2C_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+struct lsdc_i2c {
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data bit;
+ struct drm_device *ddev;
+ void __iomem *dir_reg;
+ void __iomem *dat_reg;
+ /* pin bit mask */
+ u8 sda;
+ u8 scl;
+};
+
+struct lsdc_display_pipe;
+
+int lsdc_create_i2c_chan(struct drm_device *ddev,
+ struct lsdc_display_pipe *dispipe,
+ unsigned int index);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_irq.c b/drivers/gpu/drm/loongson/lsdc_irq.c
new file mode 100644
index 000000000000..efdc4d10792d
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_irq.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_vblank.h>
+
+#include "lsdc_irq.h"
+
+/*
+ * For the DC in LS7A2000, clearing interrupt status is achieved by
+ * write "1" to LSDC_INT_REG.
+ *
+ * For the DC in LS7A1000, clear interrupt status is achieved by write "0"
+ * to LSDC_INT_REG.
+ *
+ * Two different hardware engineers modify it as their will.
+ */
+
+irqreturn_t ls7a2000_dc_irq_handler(int irq, void *arg)
+{
+ struct drm_device *ddev = arg;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ u32 val;
+
+ /* Read the interrupt status */
+ val = lsdc_rreg32(ldev, LSDC_INT_REG);
+ if ((val & INT_STATUS_MASK) == 0) {
+ drm_warn(ddev, "no interrupt occurs\n");
+ return IRQ_NONE;
+ }
+
+ ldev->irq_status = val;
+
+ /* write "1" to clear the interrupt status */
+ lsdc_wreg32(ldev, LSDC_INT_REG, val);
+
+ if (ldev->irq_status & INT_CRTC0_VSYNC)
+ drm_handle_vblank(ddev, 0);
+
+ if (ldev->irq_status & INT_CRTC1_VSYNC)
+ drm_handle_vblank(ddev, 1);
+
+ return IRQ_HANDLED;
+}
+
+/* For the DC in LS7A1000 and LS2K1000 */
+irqreturn_t ls7a1000_dc_irq_handler(int irq, void *arg)
+{
+ struct drm_device *ddev = arg;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ u32 val;
+
+ /* Read the interrupt status */
+ val = lsdc_rreg32(ldev, LSDC_INT_REG);
+ if ((val & INT_STATUS_MASK) == 0) {
+ drm_warn(ddev, "no interrupt occurs\n");
+ return IRQ_NONE;
+ }
+
+ ldev->irq_status = val;
+
+ /* write "0" to clear the interrupt status */
+ val &= ~(INT_CRTC0_VSYNC | INT_CRTC1_VSYNC);
+ lsdc_wreg32(ldev, LSDC_INT_REG, val);
+
+ if (ldev->irq_status & INT_CRTC0_VSYNC)
+ drm_handle_vblank(ddev, 0);
+
+ if (ldev->irq_status & INT_CRTC1_VSYNC)
+ drm_handle_vblank(ddev, 1);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_irq.h b/drivers/gpu/drm/loongson/lsdc_irq.h
new file mode 100644
index 000000000000..726cb3018b89
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_irq.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_IRQ_H__
+#define __LSDC_IRQ_H__
+
+#include <linux/irqreturn.h>
+
+#include "lsdc_drv.h"
+
+irqreturn_t ls7a1000_dc_irq_handler(int irq, void *arg);
+irqreturn_t ls7a2000_dc_irq_handler(int irq, void *arg);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_output.h b/drivers/gpu/drm/loongson/lsdc_output.h
new file mode 100644
index 000000000000..097789051a1d
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_output.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_OUTPUT_H__
+#define __LSDC_OUTPUT_H__
+
+#include "lsdc_drv.h"
+
+int ls7a1000_output_init(struct drm_device *ddev,
+ struct lsdc_display_pipe *dispipe,
+ struct i2c_adapter *ddc,
+ unsigned int index);
+
+int ls7a2000_output_init(struct drm_device *ldev,
+ struct lsdc_display_pipe *dispipe,
+ struct i2c_adapter *ddc,
+ unsigned int index);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_output_7a1000.c b/drivers/gpu/drm/loongson/lsdc_output_7a1000.c
new file mode 100644
index 000000000000..6fc8dd1c7d9a
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_output_7a1000.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_output.h"
+
+/*
+ * The display controller in the LS7A1000 exports two DVO interfaces, thus
+ * external encoder is required, except connected to the DPI panel directly.
+ *
+ * ___________________ _________
+ * | -------| | |
+ * | CRTC0 --> | DVO0 ----> Encoder0 ---> Connector0 ---> | Display |
+ * | _ _ -------| ^ ^ |_________|
+ * | | | | | +------+ | | |
+ * | |_| |_| | i2c6 | <--------+-------------+
+ * | +------+ |
+ * | |
+ * | DC in LS7A1000 |
+ * | |
+ * | _ _ +------+ |
+ * | | | | | | i2c7 | <--------+-------------+
+ * | |_| |_| +------+ | | | _________
+ * | -------| | | | |
+ * | CRTC1 --> | DVO1 ----> Encoder1 ---> Connector1 ---> | Panel |
+ * | -------| |_________|
+ * |___________________|
+ *
+ * Currently, we assume the external encoders connected to the DVO are
+ * transparent. Loongson's DVO interface can directly drive RGB888 panels.
+ *
+ * TODO: Add support for non-transparent encoders
+ */
+
+static int ls7a1000_dpi_connector_get_modes(struct drm_connector *conn)
+{
+ unsigned int num = 0;
+ struct edid *edid;
+
+ if (conn->ddc) {
+ edid = drm_get_edid(conn, conn->ddc);
+ if (edid) {
+ drm_connector_update_edid_property(conn, edid);
+ num = drm_add_edid_modes(conn, edid);
+ kfree(edid);
+ }
+
+ return num;
+ }
+
+ num = drm_add_modes_noedid(conn, 1920, 1200);
+
+ drm_set_preferred_mode(conn, 1024, 768);
+
+ return num;
+}
+
+static struct drm_encoder *
+ls7a1000_dpi_connector_get_best_encoder(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct lsdc_output *output = connector_to_lsdc_output(connector);
+
+ return &output->encoder;
+}
+
+static const struct drm_connector_helper_funcs
+ls7a1000_dpi_connector_helpers = {
+ .atomic_best_encoder = ls7a1000_dpi_connector_get_best_encoder,
+ .get_modes = ls7a1000_dpi_connector_get_modes,
+};
+
+static enum drm_connector_status
+ls7a1000_dpi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct i2c_adapter *ddc = connector->ddc;
+
+ if (ddc) {
+ if (drm_probe_ddc(ddc))
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+ }
+
+ return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs ls7a1000_dpi_connector_funcs = {
+ .detect = ls7a1000_dpi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+};
+
+static void ls7a1000_pipe0_encoder_reset(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+
+ /*
+ * We need this for S3 support, screen will not lightup if don't set
+ * this register correctly.
+ */
+ lsdc_wreg32(ldev, LSDC_CRTC0_DVO_CONF_REG,
+ PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN);
+}
+
+static void ls7a1000_pipe1_encoder_reset(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+
+ /*
+ * We need this for S3 support, screen will not lightup if don't set
+ * this register correctly.
+ */
+
+ /* DVO */
+ lsdc_wreg32(ldev, LSDC_CRTC1_DVO_CONF_REG,
+ BIT(31) | PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN);
+}
+
+static const struct drm_encoder_funcs ls7a1000_encoder_funcs[2] = {
+ {
+ .reset = ls7a1000_pipe0_encoder_reset,
+ .destroy = drm_encoder_cleanup,
+ },
+ {
+ .reset = ls7a1000_pipe1_encoder_reset,
+ .destroy = drm_encoder_cleanup,
+ },
+};
+
+int ls7a1000_output_init(struct drm_device *ddev,
+ struct lsdc_display_pipe *dispipe,
+ struct i2c_adapter *ddc,
+ unsigned int index)
+{
+ struct lsdc_output *output = &dispipe->output;
+ struct drm_encoder *encoder = &output->encoder;
+ struct drm_connector *connector = &output->connector;
+ int ret;
+
+ ret = drm_encoder_init(ddev, encoder, &ls7a1000_encoder_funcs[index],
+ DRM_MODE_ENCODER_TMDS, "encoder-%u", index);
+ if (ret)
+ return ret;
+
+ encoder->possible_crtcs = BIT(index);
+
+ ret = drm_connector_init_with_ddc(ddev, connector,
+ &ls7a1000_dpi_connector_funcs,
+ DRM_MODE_CONNECTOR_DPI, ddc);
+ if (ret)
+ return ret;
+
+ drm_info(ddev, "display pipe-%u has a DVO\n", index);
+
+ drm_connector_helper_add(connector, &ls7a1000_dpi_connector_helpers);
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_output_7a2000.c b/drivers/gpu/drm/loongson/lsdc_output_7a2000.c
new file mode 100644
index 000000000000..ce3dabec887e
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_output_7a2000.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_output.h"
+
+/*
+ * The display controller in LS7A2000 has two display pipes
+ * Display pipe 0 is attached with a built-in transparent VGA encoder and
+ * a built-in HDMI encoder.
+ * Display pipe 1 has only one built-in HDMI encoder connected.
+ * ______________________ _____________
+ * | +-----+ | | |
+ * | CRTC0 -+--> | VGA | ----> VGA Connector ---> | VGA Monitor |<---+
+ * | | +-----+ | |_____________| |
+ * | | | ______________ |
+ * | | +------+ | | | |
+ * | +--> | HDMI | ----> HDMI Connector --> | HDMI Monitor |<--+
+ * | +------+ | |______________| |
+ * | +------+ | |
+ * | | i2c6 | <-------------------------------------------+
+ * | +------+ |
+ * | |
+ * | DC in LS7A2000 |
+ * | |
+ * | +------+ |
+ * | | i2c7 | <--------------------------------+
+ * | +------+ | |
+ * | | ______|_______
+ * | +------+ | | |
+ * | CRTC1 ---> | HDMI | ----> HDMI Connector ---> | HDMI Monitor |
+ * | +------+ | |______________|
+ * |______________________|
+ */
+
+static int ls7a2000_connector_get_modes(struct drm_connector *connector)
+{
+ unsigned int num = 0;
+ struct edid *edid;
+
+ if (connector->ddc) {
+ edid = drm_get_edid(connector, connector->ddc);
+ if (edid) {
+ drm_connector_update_edid_property(connector, edid);
+ num = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return num;
+ }
+
+ num = drm_add_modes_noedid(connector, 1920, 1200);
+
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return num;
+}
+
+static struct drm_encoder *
+ls7a2000_connector_get_best_encoder(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct lsdc_output *output = connector_to_lsdc_output(connector);
+
+ return &output->encoder;
+}
+
+static const struct drm_connector_helper_funcs ls7a2000_connector_helpers = {
+ .atomic_best_encoder = ls7a2000_connector_get_best_encoder,
+ .get_modes = ls7a2000_connector_get_modes,
+};
+
+/* debugfs */
+
+#define LSDC_HDMI_REG(i, reg) { \
+ .name = __stringify_1(LSDC_HDMI##i##_##reg##_REG), \
+ .offset = LSDC_HDMI##i##_##reg##_REG, \
+}
+
+static const struct lsdc_reg32 ls7a2000_hdmi0_encoder_regs[] = {
+ LSDC_HDMI_REG(0, ZONE),
+ LSDC_HDMI_REG(0, INTF_CTRL),
+ LSDC_HDMI_REG(0, PHY_CTRL),
+ LSDC_HDMI_REG(0, PHY_PLL),
+ LSDC_HDMI_REG(0, AVI_INFO_CRTL),
+ LSDC_HDMI_REG(0, PHY_CAL),
+ LSDC_HDMI_REG(0, AUDIO_PLL_LO),
+ LSDC_HDMI_REG(0, AUDIO_PLL_HI),
+ {NULL, 0}, /* MUST be {NULL, 0} terminated */
+};
+
+static const struct lsdc_reg32 ls7a2000_hdmi1_encoder_regs[] = {
+ LSDC_HDMI_REG(1, ZONE),
+ LSDC_HDMI_REG(1, INTF_CTRL),
+ LSDC_HDMI_REG(1, PHY_CTRL),
+ LSDC_HDMI_REG(1, PHY_PLL),
+ LSDC_HDMI_REG(1, AVI_INFO_CRTL),
+ LSDC_HDMI_REG(1, PHY_CAL),
+ LSDC_HDMI_REG(1, AUDIO_PLL_LO),
+ LSDC_HDMI_REG(1, AUDIO_PLL_HI),
+ {NULL, 0}, /* MUST be {NULL, 0} terminated */
+};
+
+static int ls7a2000_hdmi_encoder_regs_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *ddev = node->minor->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ const struct lsdc_reg32 *preg;
+
+ preg = (const struct lsdc_reg32 *)node->info_ent->data;
+
+ while (preg->name) {
+ u32 offset = preg->offset;
+
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ preg->name, offset, lsdc_rreg32(ldev, offset));
+ ++preg;
+ }
+
+ return 0;
+}
+
+static const struct drm_info_list ls7a2000_hdmi0_debugfs_files[] = {
+ { "regs", ls7a2000_hdmi_encoder_regs_show, 0, (void *)ls7a2000_hdmi0_encoder_regs },
+};
+
+static const struct drm_info_list ls7a2000_hdmi1_debugfs_files[] = {
+ { "regs", ls7a2000_hdmi_encoder_regs_show, 0, (void *)ls7a2000_hdmi1_encoder_regs },
+};
+
+static void ls7a2000_hdmi0_late_register(struct drm_connector *connector,
+ struct dentry *root)
+{
+ struct drm_device *ddev = connector->dev;
+ struct drm_minor *minor = ddev->primary;
+
+ drm_debugfs_create_files(ls7a2000_hdmi0_debugfs_files,
+ ARRAY_SIZE(ls7a2000_hdmi0_debugfs_files),
+ root, minor);
+}
+
+static void ls7a2000_hdmi1_late_register(struct drm_connector *connector,
+ struct dentry *root)
+{
+ struct drm_device *ddev = connector->dev;
+ struct drm_minor *minor = ddev->primary;
+
+ drm_debugfs_create_files(ls7a2000_hdmi1_debugfs_files,
+ ARRAY_SIZE(ls7a2000_hdmi1_debugfs_files),
+ root, minor);
+}
+
+/* monitor present detection */
+
+static enum drm_connector_status
+ls7a2000_hdmi0_vga_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_device *ddev = connector->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ u32 val;
+
+ val = lsdc_rreg32(ldev, LSDC_HDMI_HPD_STATUS_REG);
+
+ if (val & HDMI0_HPD_FLAG)
+ return connector_status_connected;
+
+ if (connector->ddc) {
+ if (drm_probe_ddc(connector->ddc))
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+ }
+
+ return connector_status_unknown;
+}
+
+static enum drm_connector_status
+ls7a2000_hdmi1_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct lsdc_device *ldev = to_lsdc(connector->dev);
+ u32 val;
+
+ val = lsdc_rreg32(ldev, LSDC_HDMI_HPD_STATUS_REG);
+
+ if (val & HDMI1_HPD_FLAG)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static const struct drm_connector_funcs ls7a2000_hdmi_connector_funcs[2] = {
+ {
+ .detect = ls7a2000_hdmi0_vga_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .debugfs_init = ls7a2000_hdmi0_late_register,
+ },
+ {
+ .detect = ls7a2000_hdmi1_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .debugfs_init = ls7a2000_hdmi1_late_register,
+ },
+};
+
+/* Even though some board has only one hdmi on display pipe 1,
+ * We still need hook lsdc_encoder_funcs up on display pipe 0,
+ * This is because we need its reset() callback get called, to
+ * set the LSDC_HDMIx_CTRL_REG using software gpio emulated i2c.
+ * Otherwise, the firmware may set LSDC_HDMIx_CTRL_REG blindly.
+ */
+static void ls7a2000_hdmi0_encoder_reset(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ u32 val;
+
+ val = PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN;
+ lsdc_wreg32(ldev, LSDC_CRTC0_DVO_CONF_REG, val);
+
+ /* using software gpio emulated i2c */
+ val = lsdc_rreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG);
+ val &= ~HW_I2C_EN;
+ lsdc_wreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, val);
+
+ /* help the hdmi phy to get out of reset state */
+ lsdc_wreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, HDMI_PHY_RESET_N);
+
+ mdelay(20);
+
+ drm_dbg(ddev, "HDMI-0 Reset\n");
+}
+
+static void ls7a2000_hdmi1_encoder_reset(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ u32 val;
+
+ val = PHY_CLOCK_POL | PHY_CLOCK_EN | PHY_DATA_EN;
+ lsdc_wreg32(ldev, LSDC_CRTC1_DVO_CONF_REG, val);
+
+ /* using software gpio emulated i2c */
+ val = lsdc_rreg32(ldev, LSDC_HDMI1_INTF_CTRL_REG);
+ val &= ~HW_I2C_EN;
+ lsdc_wreg32(ldev, LSDC_HDMI1_INTF_CTRL_REG, val);
+
+ /* help the hdmi phy to get out of reset state */
+ lsdc_wreg32(ldev, LSDC_HDMI1_PHY_CTRL_REG, HDMI_PHY_RESET_N);
+
+ mdelay(20);
+
+ drm_dbg(ddev, "HDMI-1 Reset\n");
+}
+
+static const struct drm_encoder_funcs ls7a2000_encoder_funcs[2] = {
+ {
+ .reset = ls7a2000_hdmi0_encoder_reset,
+ .destroy = drm_encoder_cleanup,
+ },
+ {
+ .reset = ls7a2000_hdmi1_encoder_reset,
+ .destroy = drm_encoder_cleanup,
+ },
+};
+
+static int ls7a2000_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct lsdc_output *output = encoder_to_lsdc_output(encoder);
+ struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
+ unsigned int index = dispipe->index;
+ struct drm_device *ddev = encoder->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ struct hdmi_avi_infoframe infoframe;
+ u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ unsigned char *ptr = &buffer[HDMI_INFOFRAME_HEADER_SIZE];
+ unsigned int content0, content1, content2, content3;
+ int err;
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&infoframe,
+ &output->connector,
+ mode);
+ if (err < 0) {
+ drm_err(ddev, "failed to setup AVI infoframe: %d\n", err);
+ return err;
+ }
+
+ /* Fixed infoframe configuration not linked to the mode */
+ infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ infoframe.colorimetry = HDMI_COLORIMETRY_NONE;
+
+ err = hdmi_avi_infoframe_pack(&infoframe, buffer, sizeof(buffer));
+ if (err < 0) {
+ drm_err(ddev, "failed to pack AVI infoframe: %d\n", err);
+ return err;
+ }
+
+ content0 = *(unsigned int *)ptr;
+ content1 = *(ptr + 4);
+ content2 = *(unsigned int *)(ptr + 5);
+ content3 = *(unsigned int *)(ptr + 9);
+
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT0, index, content0);
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT1, index, content1);
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT2, index, content2);
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_CONTENT3, index, content3);
+
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_AVI_INFO_CRTL_REG, index,
+ AVI_PKT_ENABLE | AVI_PKT_UPDATE);
+
+ drm_dbg(ddev, "Update HDMI-%u avi infoframe\n", index);
+
+ return 0;
+}
+
+static void ls7a2000_hdmi_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct lsdc_output *output = encoder_to_lsdc_output(encoder);
+ struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
+ unsigned int index = dispipe->index;
+ struct drm_device *ddev = encoder->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ u32 val;
+
+ /* Disable the hdmi phy */
+ val = lsdc_pipe_rreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, index);
+ val &= ~HDMI_PHY_EN;
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, index, val);
+
+ /* Disable the hdmi interface */
+ val = lsdc_pipe_rreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, index);
+ val &= ~HDMI_INTERFACE_EN;
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, index, val);
+
+ drm_dbg(ddev, "HDMI-%u disabled\n", index);
+}
+
+static void ls7a2000_hdmi_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *ddev = encoder->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ struct lsdc_output *output = encoder_to_lsdc_output(encoder);
+ struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
+ unsigned int index = dispipe->index;
+ u32 val;
+
+ /* datasheet say it should larger than 48 */
+ val = 64 << HDMI_H_ZONE_IDLE_SHIFT | 64 << HDMI_V_ZONE_IDLE_SHIFT;
+
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_ZONE_REG, index, val);
+
+ val = HDMI_PHY_TERM_STATUS |
+ HDMI_PHY_TERM_DET_EN |
+ HDMI_PHY_TERM_H_EN |
+ HDMI_PHY_TERM_L_EN |
+ HDMI_PHY_RESET_N |
+ HDMI_PHY_EN;
+
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_CTRL_REG, index, val);
+
+ udelay(2);
+
+ val = HDMI_CTL_PERIOD_MODE |
+ HDMI_AUDIO_EN |
+ HDMI_PACKET_EN |
+ HDMI_INTERFACE_EN |
+ (8 << HDMI_VIDEO_PREAMBLE_SHIFT);
+
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_INTF_CTRL_REG, index, val);
+
+ drm_dbg(ddev, "HDMI-%u enabled\n", index);
+}
+
+/*
+ * Fout = M * Fin
+ *
+ * M = (4 * LF) / (IDF * ODF)
+ *
+ * IDF: Input Division Factor
+ * ODF: Output Division Factor
+ * LF: Loop Factor
+ * M: Required Mult
+ *
+ * +--------------------------------------------------------+
+ * | Fin (kHZ) | M | IDF | LF | ODF | Fout(Mhz) |
+ * |-------------------+----+-----+----+-----+--------------|
+ * | 170000 ~ 340000 | 10 | 16 | 40 | 1 | 1700 ~ 3400 |
+ * | 85000 ~ 170000 | 10 | 8 | 40 | 2 | 850 ~ 1700 |
+ * | 42500 ~ 85000 | 10 | 4 | 40 | 4 | 425 ~ 850 |
+ * | 21250 ~ 42500 | 10 | 2 | 40 | 8 | 212.5 ~ 425 |
+ * | 20000 ~ 21250 | 10 | 1 | 40 | 16 | 200 ~ 212.5 |
+ * +--------------------------------------------------------+
+ */
+static void ls7a2000_hdmi_phy_pll_config(struct lsdc_device *ldev,
+ int fin,
+ unsigned int index)
+{
+ struct drm_device *ddev = &ldev->base;
+ int count = 0;
+ u32 val;
+
+ /* Firstly, disable phy pll */
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index, 0x0);
+
+ /*
+ * Most of time, loongson HDMI require M = 10
+ * for example, 10 = (4 * 40) / (8 * 2)
+ * here, write "1" to the ODF will get "2"
+ */
+
+ if (fin >= 170000)
+ val = (16 << HDMI_PLL_IDF_SHIFT) |
+ (40 << HDMI_PLL_LF_SHIFT) |
+ (0 << HDMI_PLL_ODF_SHIFT);
+ else if (fin >= 85000)
+ val = (8 << HDMI_PLL_IDF_SHIFT) |
+ (40 << HDMI_PLL_LF_SHIFT) |
+ (1 << HDMI_PLL_ODF_SHIFT);
+ else if (fin >= 42500)
+ val = (4 << HDMI_PLL_IDF_SHIFT) |
+ (40 << HDMI_PLL_LF_SHIFT) |
+ (2 << HDMI_PLL_ODF_SHIFT);
+ else if (fin >= 21250)
+ val = (2 << HDMI_PLL_IDF_SHIFT) |
+ (40 << HDMI_PLL_LF_SHIFT) |
+ (3 << HDMI_PLL_ODF_SHIFT);
+ else
+ val = (1 << HDMI_PLL_IDF_SHIFT) |
+ (40 << HDMI_PLL_LF_SHIFT) |
+ (4 << HDMI_PLL_ODF_SHIFT);
+
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index, val);
+
+ val |= HDMI_PLL_ENABLE;
+
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index, val);
+
+ udelay(2);
+
+ drm_dbg(ddev, "Fin of HDMI-%u: %d kHz\n", index, fin);
+
+ /* Wait hdmi phy pll lock */
+ do {
+ val = lsdc_pipe_rreg32(ldev, LSDC_HDMI0_PHY_PLL_REG, index);
+
+ if (val & HDMI_PLL_LOCKED) {
+ drm_dbg(ddev, "Setting HDMI-%u PLL take %d cycles\n",
+ index, count);
+ break;
+ }
+ ++count;
+ } while (count < 1000);
+
+ lsdc_pipe_wreg32(ldev, LSDC_HDMI0_PHY_CAL_REG, index, 0x0f000ff0);
+
+ if (count >= 1000)
+ drm_err(ddev, "Setting HDMI-%u PLL failed\n", index);
+}
+
+static void ls7a2000_hdmi_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct lsdc_output *output = encoder_to_lsdc_output(encoder);
+ struct lsdc_display_pipe *dispipe = output_to_display_pipe(output);
+ unsigned int index = dispipe->index;
+ struct drm_device *ddev = encoder->dev;
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ struct drm_display_mode *mode = &crtc_state->mode;
+
+ ls7a2000_hdmi_phy_pll_config(ldev, mode->clock, index);
+
+ ls7a2000_hdmi_set_avi_infoframe(encoder, mode);
+
+ drm_dbg(ddev, "%s modeset finished\n", encoder->name);
+}
+
+static const struct drm_encoder_helper_funcs ls7a2000_encoder_helper_funcs = {
+ .atomic_disable = ls7a2000_hdmi_atomic_disable,
+ .atomic_enable = ls7a2000_hdmi_atomic_enable,
+ .atomic_mode_set = ls7a2000_hdmi_atomic_mode_set,
+};
+
+/*
+ * For LS7A2000:
+ *
+ * 1) Most of board export one vga + hdmi output interface.
+ * 2) Yet, Some boards export double hdmi output interface.
+ * 3) Still have boards export three output(2 hdmi + 1 vga).
+ *
+ * So let's hook hdmi helper funcs to all display pipe, don't miss.
+ * writing hdmi register do no harms.
+ */
+int ls7a2000_output_init(struct drm_device *ddev,
+ struct lsdc_display_pipe *dispipe,
+ struct i2c_adapter *ddc,
+ unsigned int pipe)
+{
+ struct lsdc_output *output = &dispipe->output;
+ struct drm_encoder *encoder = &output->encoder;
+ struct drm_connector *connector = &output->connector;
+ int ret;
+
+ ret = drm_encoder_init(ddev, encoder, &ls7a2000_encoder_funcs[pipe],
+ DRM_MODE_ENCODER_TMDS, "encoder-%u", pipe);
+ if (ret)
+ return ret;
+
+ encoder->possible_crtcs = BIT(pipe);
+
+ drm_encoder_helper_add(encoder, &ls7a2000_encoder_helper_funcs);
+
+ ret = drm_connector_init_with_ddc(ddev, connector,
+ &ls7a2000_hdmi_connector_funcs[pipe],
+ DRM_MODE_CONNECTOR_HDMIA, ddc);
+ if (ret)
+ return ret;
+
+ drm_info(ddev, "display pipe-%u has HDMI %s\n", pipe, pipe ? "" : "and/or VGA");
+
+ drm_connector_helper_add(connector, &ls7a2000_connector_helpers);
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_pixpll.c b/drivers/gpu/drm/loongson/lsdc_pixpll.c
new file mode 100644
index 000000000000..04c15b4697e2
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_pixpll.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_managed.h>
+
+#include "lsdc_drv.h"
+
+/*
+ * The structure of the pixel PLL registers is evolved with times,
+ * it can be different across different chip also.
+ */
+
+/* size is u64, note that all loongson's cpu is little endian.
+ * This structure is same for ls7a2000, ls7a1000 and ls2k2000.
+ */
+struct lsdc_pixpll_reg {
+ /* Byte 0 ~ Byte 3 */
+ unsigned div_out : 7; /* 6 : 0 Output clock divider */
+ unsigned _reserved_1_ : 14; /* 20 : 7 */
+ unsigned loopc : 9; /* 29 : 21 Clock multiplier */
+ unsigned _reserved_2_ : 2; /* 31 : 30 */
+
+ /* Byte 4 ~ Byte 7 */
+ unsigned div_ref : 7; /* 38 : 32 Input clock divider */
+ unsigned locked : 1; /* 39 PLL locked indicator */
+ unsigned sel_out : 1; /* 40 output clk selector */
+ unsigned _reserved_3_ : 2; /* 42 : 41 */
+ unsigned set_param : 1; /* 43 Trigger the update */
+ unsigned bypass : 1; /* 44 */
+ unsigned powerdown : 1; /* 45 */
+ unsigned _reserved_4_ : 18; /* 46 : 63 no use */
+};
+
+union lsdc_pixpll_reg_bitmap {
+ struct lsdc_pixpll_reg bitmap;
+ u32 w[2];
+ u64 d;
+};
+
+struct clk_to_pixpll_parms_lookup_t {
+ unsigned int clock; /* kHz */
+
+ unsigned short width;
+ unsigned short height;
+ unsigned short vrefresh;
+
+ /* Stores parameters for programming the Hardware PLLs */
+ unsigned short div_out;
+ unsigned short loopc;
+ unsigned short div_ref;
+};
+
+static const struct clk_to_pixpll_parms_lookup_t pixpll_parms_table[] = {
+ {148500, 1920, 1080, 60, 11, 49, 3}, /* 1920x1080@60Hz */
+ {141750, 1920, 1080, 60, 11, 78, 5}, /* 1920x1080@60Hz */
+ /* 1920x1080@50Hz */
+ {174500, 1920, 1080, 75, 17, 89, 3}, /* 1920x1080@75Hz */
+ {181250, 2560, 1080, 75, 8, 58, 4}, /* 2560x1080@75Hz */
+ {297000, 2560, 1080, 30, 8, 95, 4}, /* 3840x2160@30Hz */
+ {301992, 1920, 1080, 100, 10, 151, 5}, /* 1920x1080@100Hz */
+ {146250, 1680, 1050, 60, 16, 117, 5}, /* 1680x1050@60Hz */
+ {135000, 1280, 1024, 75, 10, 54, 4}, /* 1280x1024@75Hz */
+ {119000, 1680, 1050, 60, 20, 119, 5}, /* 1680x1050@60Hz */
+ {108000, 1600, 900, 60, 15, 81, 5}, /* 1600x900@60Hz */
+ /* 1280x1024@60Hz */
+ /* 1280x960@60Hz */
+ /* 1152x864@75Hz */
+
+ {106500, 1440, 900, 60, 19, 81, 4}, /* 1440x900@60Hz */
+ {88750, 1440, 900, 60, 16, 71, 5}, /* 1440x900@60Hz */
+ {83500, 1280, 800, 60, 17, 71, 5}, /* 1280x800@60Hz */
+ {71000, 1280, 800, 60, 20, 71, 5}, /* 1280x800@60Hz */
+
+ {74250, 1280, 720, 60, 22, 49, 3}, /* 1280x720@60Hz */
+ /* 1280x720@50Hz */
+
+ {78750, 1024, 768, 75, 16, 63, 5}, /* 1024x768@75Hz */
+ {75000, 1024, 768, 70, 29, 87, 4}, /* 1024x768@70Hz */
+ {65000, 1024, 768, 60, 20, 39, 3}, /* 1024x768@60Hz */
+
+ {51200, 1024, 600, 60, 25, 64, 5}, /* 1024x600@60Hz */
+
+ {57284, 832, 624, 75, 24, 55, 4}, /* 832x624@75Hz */
+ {49500, 800, 600, 75, 40, 99, 5}, /* 800x600@75Hz */
+ {50000, 800, 600, 72, 44, 88, 4}, /* 800x600@72Hz */
+ {40000, 800, 600, 60, 30, 36, 3}, /* 800x600@60Hz */
+ {36000, 800, 600, 56, 50, 72, 4}, /* 800x600@56Hz */
+ {31500, 640, 480, 75, 40, 63, 5}, /* 640x480@75Hz */
+ /* 640x480@73Hz */
+
+ {30240, 640, 480, 67, 62, 75, 4}, /* 640x480@67Hz */
+ {27000, 720, 576, 50, 50, 54, 4}, /* 720x576@60Hz */
+ {25175, 640, 480, 60, 85, 107, 5}, /* 640x480@60Hz */
+ {25200, 640, 480, 60, 50, 63, 5}, /* 640x480@60Hz */
+ /* 720x480@60Hz */
+};
+
+static void lsdc_pixel_pll_free(struct drm_device *ddev, void *data)
+{
+ struct lsdc_pixpll *this = (struct lsdc_pixpll *)data;
+
+ iounmap(this->mmio);
+
+ kfree(this->priv);
+
+ drm_dbg(ddev, "pixpll private data freed\n");
+}
+
+/*
+ * ioremap the device dependent PLL registers
+ *
+ * @this: point to the object where this function is called from
+ */
+static int lsdc_pixel_pll_setup(struct lsdc_pixpll * const this)
+{
+ struct lsdc_pixpll_parms *pparms;
+
+ this->mmio = ioremap(this->reg_base, this->reg_size);
+ if (IS_ERR_OR_NULL(this->mmio))
+ return -ENOMEM;
+
+ pparms = kzalloc(sizeof(*pparms), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(pparms))
+ return -ENOMEM;
+
+ pparms->ref_clock = LSDC_PLL_REF_CLK_KHZ;
+
+ this->priv = pparms;
+
+ return drmm_add_action_or_reset(this->ddev, lsdc_pixel_pll_free, this);
+}
+
+/*
+ * Find a set of pll parameters from a static local table which avoid
+ * computing the pll parameter eachtime a modeset is triggered.
+ *
+ * @this: point to the object where this function is called from
+ * @clock: the desired output pixel clock, the unit is kHz
+ * @pout: point to where the parameters to store if found
+ *
+ * Return 0 if success, return -1 if not found.
+ */
+static int lsdc_pixpll_find(struct lsdc_pixpll * const this,
+ unsigned int clock,
+ struct lsdc_pixpll_parms *pout)
+{
+ unsigned int num = ARRAY_SIZE(pixpll_parms_table);
+ const struct clk_to_pixpll_parms_lookup_t *pt;
+ unsigned int i;
+
+ for (i = 0; i < num; ++i) {
+ pt = &pixpll_parms_table[i];
+
+ if (clock == pt->clock) {
+ pout->div_ref = pt->div_ref;
+ pout->loopc = pt->loopc;
+ pout->div_out = pt->div_out;
+
+ return 0;
+ }
+ }
+
+ drm_dbg_kms(this->ddev, "pixel clock %u: miss\n", clock);
+
+ return -1;
+}
+
+/*
+ * Find a set of pll parameters which have minimal difference with the
+ * desired pixel clock frequency. It does that by computing all of the
+ * possible combination. Compute the diff and find the combination with
+ * minimal diff.
+ *
+ * clock_out = refclk / div_ref * loopc / div_out
+ *
+ * refclk is determined by the oscillator mounted on motherboard(100MHz
+ * in almost all board)
+ *
+ * @this: point to the object from where this function is called
+ * @clock: the desired output pixel clock, the unit is kHz
+ * @pout: point to the out struct of lsdc_pixpll_parms
+ *
+ * Return 0 if a set of parameter is found, otherwise return the error
+ * between clock_kHz we wanted and the most closest candidate with it.
+ */
+static int lsdc_pixel_pll_compute(struct lsdc_pixpll * const this,
+ unsigned int clock,
+ struct lsdc_pixpll_parms *pout)
+{
+ struct lsdc_pixpll_parms *pparms = this->priv;
+ unsigned int refclk = pparms->ref_clock;
+ const unsigned int tolerance = 1000;
+ unsigned int min = tolerance;
+ unsigned int div_out, loopc, div_ref;
+ unsigned int computed;
+
+ if (!lsdc_pixpll_find(this, clock, pout))
+ return 0;
+
+ for (div_out = 6; div_out < 64; div_out++) {
+ for (div_ref = 3; div_ref < 6; div_ref++) {
+ for (loopc = 6; loopc < 161; loopc++) {
+ unsigned int diff = 0;
+
+ if (loopc < 12 * div_ref)
+ continue;
+ if (loopc > 32 * div_ref)
+ continue;
+
+ computed = refclk / div_ref * loopc / div_out;
+
+ if (clock >= computed)
+ diff = clock - computed;
+ else
+ diff = computed - clock;
+
+ if (diff < min) {
+ min = diff;
+ pparms->div_ref = div_ref;
+ pparms->div_out = div_out;
+ pparms->loopc = loopc;
+
+ if (diff == 0) {
+ *pout = *pparms;
+ return 0;
+ }
+ }
+ }
+ }
+ }
+
+ /* still acceptable */
+ if (min < tolerance) {
+ *pout = *pparms;
+ return 0;
+ }
+
+ drm_dbg(this->ddev, "can't find suitable params for %u khz\n", clock);
+
+ return min;
+}
+
+/* Pixel pll hardware related ops, per display pipe */
+
+static void __pixpll_rreg(struct lsdc_pixpll *this,
+ union lsdc_pixpll_reg_bitmap *dst)
+{
+#if defined(CONFIG_64BIT)
+ dst->d = readq(this->mmio);
+#else
+ dst->w[0] = readl(this->mmio);
+ dst->w[1] = readl(this->mmio + 4);
+#endif
+}
+
+static void __pixpll_wreg(struct lsdc_pixpll *this,
+ union lsdc_pixpll_reg_bitmap *src)
+{
+#if defined(CONFIG_64BIT)
+ writeq(src->d, this->mmio);
+#else
+ writel(src->w[0], this->mmio);
+ writel(src->w[1], this->mmio + 4);
+#endif
+}
+
+static void __pixpll_ops_powerup(struct lsdc_pixpll * const this)
+{
+ union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+ __pixpll_rreg(this, &pixpll_reg);
+
+ pixpll_reg.bitmap.powerdown = 0;
+
+ __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_powerdown(struct lsdc_pixpll * const this)
+{
+ union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+ __pixpll_rreg(this, &pixpll_reg);
+
+ pixpll_reg.bitmap.powerdown = 1;
+
+ __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_on(struct lsdc_pixpll * const this)
+{
+ union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+ __pixpll_rreg(this, &pixpll_reg);
+
+ pixpll_reg.bitmap.sel_out = 1;
+
+ __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_off(struct lsdc_pixpll * const this)
+{
+ union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+ __pixpll_rreg(this, &pixpll_reg);
+
+ pixpll_reg.bitmap.sel_out = 0;
+
+ __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_bypass(struct lsdc_pixpll * const this)
+{
+ union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+ __pixpll_rreg(this, &pixpll_reg);
+
+ pixpll_reg.bitmap.bypass = 1;
+
+ __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_unbypass(struct lsdc_pixpll * const this)
+{
+ union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+ __pixpll_rreg(this, &pixpll_reg);
+
+ pixpll_reg.bitmap.bypass = 0;
+
+ __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_untoggle_param(struct lsdc_pixpll * const this)
+{
+ union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+ __pixpll_rreg(this, &pixpll_reg);
+
+ pixpll_reg.bitmap.set_param = 0;
+
+ __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_set_param(struct lsdc_pixpll * const this,
+ struct lsdc_pixpll_parms const *p)
+{
+ union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+ __pixpll_rreg(this, &pixpll_reg);
+
+ pixpll_reg.bitmap.div_ref = p->div_ref;
+ pixpll_reg.bitmap.loopc = p->loopc;
+ pixpll_reg.bitmap.div_out = p->div_out;
+
+ __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_toggle_param(struct lsdc_pixpll * const this)
+{
+ union lsdc_pixpll_reg_bitmap pixpll_reg;
+
+ __pixpll_rreg(this, &pixpll_reg);
+
+ pixpll_reg.bitmap.set_param = 1;
+
+ __pixpll_wreg(this, &pixpll_reg);
+}
+
+static void __pixpll_ops_wait_locked(struct lsdc_pixpll * const this)
+{
+ union lsdc_pixpll_reg_bitmap pixpll_reg;
+ unsigned int counter = 0;
+
+ do {
+ __pixpll_rreg(this, &pixpll_reg);
+
+ if (pixpll_reg.bitmap.locked)
+ break;
+
+ ++counter;
+ } while (counter < 2000);
+
+ drm_dbg(this->ddev, "%u loop waited\n", counter);
+}
+
+/*
+ * Update the PLL parameters to the PLL hardware
+ *
+ * @this: point to the object from which this function is called
+ * @pin: point to the struct of lsdc_pixpll_parms passed in
+ *
+ * return 0 if successful.
+ */
+static int lsdc_pixpll_update(struct lsdc_pixpll * const this,
+ struct lsdc_pixpll_parms const *pin)
+{
+ __pixpll_ops_bypass(this);
+
+ __pixpll_ops_off(this);
+
+ __pixpll_ops_powerdown(this);
+
+ __pixpll_ops_toggle_param(this);
+
+ __pixpll_ops_set_param(this, pin);
+
+ __pixpll_ops_untoggle_param(this);
+
+ __pixpll_ops_powerup(this);
+
+ udelay(2);
+
+ __pixpll_ops_wait_locked(this);
+
+ __pixpll_ops_on(this);
+
+ __pixpll_ops_unbypass(this);
+
+ return 0;
+}
+
+static unsigned int lsdc_pixpll_get_freq(struct lsdc_pixpll * const this)
+{
+ struct lsdc_pixpll_parms *ppar = this->priv;
+ union lsdc_pixpll_reg_bitmap pix_pll_reg;
+ unsigned int freq;
+
+ __pixpll_rreg(this, &pix_pll_reg);
+
+ ppar->div_ref = pix_pll_reg.bitmap.div_ref;
+ ppar->loopc = pix_pll_reg.bitmap.loopc;
+ ppar->div_out = pix_pll_reg.bitmap.div_out;
+
+ freq = ppar->ref_clock / ppar->div_ref * ppar->loopc / ppar->div_out;
+
+ return freq;
+}
+
+static void lsdc_pixpll_print(struct lsdc_pixpll * const this,
+ struct drm_printer *p)
+{
+ struct lsdc_pixpll_parms *parms = this->priv;
+
+ drm_printf(p, "div_ref: %u, loopc: %u, div_out: %u\n",
+ parms->div_ref, parms->loopc, parms->div_out);
+}
+
+/*
+ * LS7A1000, LS7A2000 and ls2k2000's pixel pll setting register is same,
+ * we take this as default, create a new instance if a different model is
+ * introduced.
+ */
+static const struct lsdc_pixpll_funcs __pixpll_default_funcs = {
+ .setup = lsdc_pixel_pll_setup,
+ .compute = lsdc_pixel_pll_compute,
+ .update = lsdc_pixpll_update,
+ .get_rate = lsdc_pixpll_get_freq,
+ .print = lsdc_pixpll_print,
+};
+
+/* pixel pll initialization */
+
+int lsdc_pixpll_init(struct lsdc_pixpll * const this,
+ struct drm_device *ddev,
+ unsigned int index)
+{
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ const struct lsdc_desc *descp = ldev->descp;
+ const struct loongson_gfx_desc *gfx = to_loongson_gfx(descp);
+
+ this->ddev = ddev;
+ this->reg_size = 8;
+ this->reg_base = gfx->conf_reg_base + gfx->pixpll[index].reg_offset;
+ this->funcs = &__pixpll_default_funcs;
+
+ return this->funcs->setup(this);
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_pixpll.h b/drivers/gpu/drm/loongson/lsdc_pixpll.h
new file mode 100644
index 000000000000..ec3486d90ab6
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_pixpll.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_PIXPLL_H__
+#define __LSDC_PIXPLL_H__
+
+#include <drm/drm_device.h>
+
+/*
+ * Loongson Pixel PLL hardware structure
+ *
+ * refclk: reference frequency, 100 MHz from external oscillator
+ * outclk: output frequency desired.
+ *
+ *
+ * L1 Fref Fvco L2
+ * refclk +-----------+ +------------------+ +---------+ outclk
+ * ---+---> | Prescaler | ---> | Clock Multiplier | ---> | divider | -------->
+ * | +-----------+ +------------------+ +---------+ ^
+ * | ^ ^ ^ |
+ * | | | | |
+ * | | | | |
+ * | div_ref loopc div_out |
+ * | |
+ * +---- bypass (bypass above software configurable clock if set) ----+
+ *
+ * outclk = refclk / div_ref * loopc / div_out;
+ *
+ * sel_out: PLL clock output selector(enable).
+ *
+ * If sel_out == 1, then enable output clock (turn On);
+ * If sel_out == 0, then disable output clock (turn Off);
+ *
+ * PLL working requirements:
+ *
+ * 1) 20 MHz <= refclk / div_ref <= 40Mhz
+ * 2) 1.2 GHz <= refclk /div_out * loopc <= 3.2 Ghz
+ */
+
+struct lsdc_pixpll_parms {
+ unsigned int ref_clock;
+ unsigned int div_ref;
+ unsigned int loopc;
+ unsigned int div_out;
+};
+
+struct lsdc_pixpll;
+
+struct lsdc_pixpll_funcs {
+ int (*setup)(struct lsdc_pixpll * const this);
+
+ int (*compute)(struct lsdc_pixpll * const this,
+ unsigned int clock,
+ struct lsdc_pixpll_parms *pout);
+
+ int (*update)(struct lsdc_pixpll * const this,
+ struct lsdc_pixpll_parms const *pin);
+
+ unsigned int (*get_rate)(struct lsdc_pixpll * const this);
+
+ void (*print)(struct lsdc_pixpll * const this,
+ struct drm_printer *printer);
+};
+
+struct lsdc_pixpll {
+ const struct lsdc_pixpll_funcs *funcs;
+
+ struct drm_device *ddev;
+
+ /* PLL register offset */
+ u32 reg_base;
+ /* PLL register size in bytes */
+ u32 reg_size;
+
+ void __iomem *mmio;
+
+ struct lsdc_pixpll_parms *priv;
+};
+
+int lsdc_pixpll_init(struct lsdc_pixpll * const this,
+ struct drm_device *ddev,
+ unsigned int index);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c
new file mode 100644
index 000000000000..0d5094633222
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_plane.c
@@ -0,0 +1,793 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_regs.h"
+#include "lsdc_ttm.h"
+
+static const u32 lsdc_primary_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const u32 lsdc_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static const u64 lsdc_fb_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static unsigned int lsdc_get_fb_offset(struct drm_framebuffer *fb,
+ struct drm_plane_state *state)
+{
+ unsigned int offset = fb->offsets[0];
+
+ offset += fb->format->cpp[0] * (state->src_x >> 16);
+ offset += fb->pitches[0] * (state->src_y >> 16);
+
+ return offset;
+}
+
+static u64 lsdc_fb_base_addr(struct drm_framebuffer *fb)
+{
+ struct lsdc_device *ldev = to_lsdc(fb->dev);
+ struct lsdc_bo *lbo = gem_to_lsdc_bo(fb->obj[0]);
+
+ return lsdc_bo_gpu_offset(lbo) + ldev->vram_base;
+}
+
+static int lsdc_primary_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+
+ if (!crtc)
+ return 0;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, true);
+}
+
+static void lsdc_primary_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct lsdc_primary *primary = to_lsdc_primary(plane);
+ const struct lsdc_primary_plane_ops *ops = primary->ops;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_framebuffer *old_fb = old_plane_state->fb;
+ u64 fb_addr = lsdc_fb_base_addr(new_fb);
+
+ fb_addr += lsdc_get_fb_offset(new_fb, new_plane_state);
+
+ ops->update_fb_addr(primary, fb_addr);
+ ops->update_fb_stride(primary, new_fb->pitches[0]);
+
+ if (!old_fb || old_fb->format != new_fb->format)
+ ops->update_fb_format(primary, new_fb->format);
+}
+
+static void lsdc_primary_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ /*
+ * Do nothing, just prevent call into atomic_update().
+ * Writing the format as LSDC_PF_NONE can disable the primary,
+ * But it seems not necessary...
+ */
+ drm_dbg(plane->dev, "%s disabled\n", plane->name);
+}
+
+static int lsdc_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct lsdc_bo *lbo;
+ u64 gpu_vaddr;
+ int ret;
+
+ if (!fb)
+ return 0;
+
+ lbo = gem_to_lsdc_bo(fb->obj[0]);
+
+ ret = lsdc_bo_reserve(lbo);
+ if (unlikely(ret)) {
+ drm_err(plane->dev, "bo %p reserve failed\n", lbo);
+ return ret;
+ }
+
+ ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_VRAM, &gpu_vaddr);
+
+ lsdc_bo_unreserve(lbo);
+
+ if (unlikely(ret)) {
+ drm_err(plane->dev, "bo %p pin failed\n", lbo);
+ return ret;
+ }
+
+ lsdc_bo_ref(lbo);
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ drm_dbg(plane->dev,
+ "%s[%p] pin at 0x%llx, bo size: %zu\n",
+ plane->name, lbo, gpu_vaddr, lsdc_bo_size(lbo));
+
+ return drm_gem_plane_helper_prepare_fb(plane, new_state);
+}
+
+static void lsdc_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_framebuffer *fb = old_state->fb;
+ struct lsdc_bo *lbo;
+ int ret;
+
+ if (!fb)
+ return;
+
+ lbo = gem_to_lsdc_bo(fb->obj[0]);
+
+ ret = lsdc_bo_reserve(lbo);
+ if (unlikely(ret)) {
+ drm_err(plane->dev, "%p reserve failed\n", lbo);
+ return;
+ }
+
+ lsdc_bo_unpin(lbo);
+
+ lsdc_bo_unreserve(lbo);
+
+ lsdc_bo_unref(lbo);
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ drm_dbg(plane->dev, "%s unpin\n", plane->name);
+}
+
+static const struct drm_plane_helper_funcs lsdc_primary_helper_funcs = {
+ .prepare_fb = lsdc_plane_prepare_fb,
+ .cleanup_fb = lsdc_plane_cleanup_fb,
+ .atomic_check = lsdc_primary_atomic_check,
+ .atomic_update = lsdc_primary_atomic_update,
+ .atomic_disable = lsdc_primary_atomic_disable,
+};
+
+static int lsdc_cursor_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state;
+ struct drm_crtc_state *crtc_state;
+
+ new_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (!plane->state || !plane->state->fb) {
+ drm_dbg(plane->dev, "%s: state is NULL\n", plane->name);
+ return -EINVAL;
+ }
+
+ if (new_state->crtc_w != new_state->crtc_h) {
+ drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
+ new_state->crtc_w, new_state->crtc_h);
+ return -EINVAL;
+ }
+
+ if (new_state->crtc_w != 64 && new_state->crtc_w != 32) {
+ drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
+ new_state->crtc_w, new_state->crtc_h);
+ return -EINVAL;
+ }
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc);
+ if (!crtc_state->active)
+ return -EINVAL;
+
+ if (plane->state->crtc != new_state->crtc ||
+ plane->state->src_w != new_state->src_w ||
+ plane->state->src_h != new_state->src_h ||
+ plane->state->crtc_w != new_state->crtc_w ||
+ plane->state->crtc_h != new_state->crtc_h)
+ return -EINVAL;
+
+ if (new_state->visible != plane->state->visible)
+ return -EINVAL;
+
+ return drm_atomic_helper_check_plane_state(plane->state,
+ crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+}
+
+static void lsdc_cursor_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+ const struct lsdc_cursor_plane_ops *ops = cursor->ops;
+ struct drm_framebuffer *old_fb = plane->state->fb;
+ struct drm_framebuffer *new_fb;
+ struct drm_plane_state *new_state;
+
+ new_state = drm_atomic_get_new_plane_state(state, plane);
+
+ new_fb = plane->state->fb;
+
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_h = new_state->src_h;
+ plane->state->src_w = new_state->src_w;
+ swap(plane->state->fb, new_state->fb);
+
+ if (new_state->visible) {
+ enum lsdc_cursor_size cursor_size;
+
+ switch (new_state->crtc_w) {
+ case 64:
+ cursor_size = CURSOR_SIZE_64X64;
+ break;
+ case 32:
+ cursor_size = CURSOR_SIZE_32X32;
+ break;
+ default:
+ cursor_size = CURSOR_SIZE_32X32;
+ break;
+ }
+
+ ops->update_position(cursor, new_state->crtc_x, new_state->crtc_y);
+
+ ops->update_cfg(cursor, cursor_size, CURSOR_FORMAT_ARGB8888);
+
+ if (!old_fb || old_fb != new_fb)
+ ops->update_bo_addr(cursor, lsdc_fb_base_addr(new_fb));
+ }
+}
+
+/* ls7a1000 cursor plane helpers */
+
+static int ls7a1000_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ crtc = new_plane_state->crtc;
+ if (!crtc) {
+ drm_dbg(plane->dev, "%s is not bind to a crtc\n", plane->name);
+ return 0;
+ }
+
+ if (new_plane_state->crtc_w != 32 || new_plane_state->crtc_h != 32) {
+ drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
+ new_plane_state->crtc_w, new_plane_state->crtc_h);
+ return -EINVAL;
+ }
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+}
+
+static void ls7a1000_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_framebuffer *old_fb = old_plane_state->fb;
+ const struct lsdc_cursor_plane_ops *ops = cursor->ops;
+ u64 addr = lsdc_fb_base_addr(new_fb);
+
+ if (!new_plane_state->visible)
+ return;
+
+ ops->update_position(cursor, new_plane_state->crtc_x, new_plane_state->crtc_y);
+
+ if (!old_fb || old_fb != new_fb)
+ ops->update_bo_addr(cursor, addr);
+
+ ops->update_cfg(cursor, CURSOR_SIZE_32X32, CURSOR_FORMAT_ARGB8888);
+}
+
+static void ls7a1000_cursor_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+ const struct lsdc_cursor_plane_ops *ops = cursor->ops;
+
+ ops->update_cfg(cursor, CURSOR_SIZE_32X32, CURSOR_FORMAT_DISABLE);
+}
+
+static const struct drm_plane_helper_funcs ls7a1000_cursor_plane_helper_funcs = {
+ .prepare_fb = lsdc_plane_prepare_fb,
+ .cleanup_fb = lsdc_plane_cleanup_fb,
+ .atomic_check = ls7a1000_cursor_plane_atomic_check,
+ .atomic_update = ls7a1000_cursor_plane_atomic_update,
+ .atomic_disable = ls7a1000_cursor_plane_atomic_disable,
+ .atomic_async_check = lsdc_cursor_plane_atomic_async_check,
+ .atomic_async_update = lsdc_cursor_plane_atomic_async_update,
+};
+
+/* ls7a2000 cursor plane helpers */
+
+static int ls7a2000_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ crtc = new_plane_state->crtc;
+ if (!crtc) {
+ drm_dbg(plane->dev, "%s is not bind to a crtc\n", plane->name);
+ return 0;
+ }
+
+ if (new_plane_state->crtc_w != new_plane_state->crtc_h) {
+ drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
+ new_plane_state->crtc_w, new_plane_state->crtc_h);
+ return -EINVAL;
+ }
+
+ if (new_plane_state->crtc_w != 64 && new_plane_state->crtc_w != 32) {
+ drm_dbg(plane->dev, "unsupported cursor size: %ux%u\n",
+ new_plane_state->crtc_w, new_plane_state->crtc_h);
+ return -EINVAL;
+ }
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, true);
+}
+
+/* Update the format, size and location of the cursor */
+
+static void ls7a2000_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_framebuffer *old_fb = old_plane_state->fb;
+ const struct lsdc_cursor_plane_ops *ops = cursor->ops;
+ enum lsdc_cursor_size cursor_size;
+
+ if (!new_plane_state->visible)
+ return;
+
+ ops->update_position(cursor, new_plane_state->crtc_x, new_plane_state->crtc_y);
+
+ if (!old_fb || new_fb != old_fb) {
+ u64 addr = lsdc_fb_base_addr(new_fb);
+
+ ops->update_bo_addr(cursor, addr);
+ }
+
+ switch (new_plane_state->crtc_w) {
+ case 64:
+ cursor_size = CURSOR_SIZE_64X64;
+ break;
+ case 32:
+ cursor_size = CURSOR_SIZE_32X32;
+ break;
+ default:
+ cursor_size = CURSOR_SIZE_64X64;
+ break;
+ }
+
+ ops->update_cfg(cursor, cursor_size, CURSOR_FORMAT_ARGB8888);
+}
+
+static void ls7a2000_cursor_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+ const struct lsdc_cursor_plane_ops *hw_ops = cursor->ops;
+
+ hw_ops->update_cfg(cursor, CURSOR_SIZE_64X64, CURSOR_FORMAT_DISABLE);
+}
+
+static const struct drm_plane_helper_funcs ls7a2000_cursor_plane_helper_funcs = {
+ .prepare_fb = lsdc_plane_prepare_fb,
+ .cleanup_fb = lsdc_plane_cleanup_fb,
+ .atomic_check = ls7a2000_cursor_plane_atomic_check,
+ .atomic_update = ls7a2000_cursor_plane_atomic_update,
+ .atomic_disable = ls7a2000_cursor_plane_atomic_disable,
+ .atomic_async_check = lsdc_cursor_plane_atomic_async_check,
+ .atomic_async_update = lsdc_cursor_plane_atomic_async_update,
+};
+
+static void lsdc_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ u64 addr;
+
+ if (!fb)
+ return;
+
+ addr = lsdc_fb_base_addr(fb);
+
+ drm_printf(p, "\tdma addr=%llx\n", addr);
+}
+
+static const struct drm_plane_funcs lsdc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .atomic_print_state = lsdc_plane_atomic_print_state,
+};
+
+/* Primary plane 0 hardware related ops */
+
+static void lsdc_primary0_update_fb_addr(struct lsdc_primary *primary, u64 addr)
+{
+ struct lsdc_device *ldev = primary->ldev;
+ u32 status;
+ u32 lo, hi;
+
+ /* 40-bit width physical address bus */
+ lo = addr & 0xFFFFFFFF;
+ hi = (addr >> 32) & 0xFF;
+
+ status = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
+ if (status & FB_REG_IN_USING) {
+ lsdc_wreg32(ldev, LSDC_CRTC0_FB1_ADDR_LO_REG, lo);
+ lsdc_wreg32(ldev, LSDC_CRTC0_FB1_ADDR_HI_REG, hi);
+ } else {
+ lsdc_wreg32(ldev, LSDC_CRTC0_FB0_ADDR_LO_REG, lo);
+ lsdc_wreg32(ldev, LSDC_CRTC0_FB0_ADDR_HI_REG, hi);
+ }
+}
+
+static void lsdc_primary0_update_fb_stride(struct lsdc_primary *primary, u32 stride)
+{
+ struct lsdc_device *ldev = primary->ldev;
+
+ lsdc_wreg32(ldev, LSDC_CRTC0_STRIDE_REG, stride);
+}
+
+static void lsdc_primary0_update_fb_format(struct lsdc_primary *primary,
+ const struct drm_format_info *format)
+{
+ struct lsdc_device *ldev = primary->ldev;
+ u32 status;
+
+ status = lsdc_rreg32(ldev, LSDC_CRTC0_CFG_REG);
+
+ /*
+ * TODO: add RGB565 support, only support XRBG8888 at present
+ */
+ status &= ~CFG_PIX_FMT_MASK;
+ status |= LSDC_PF_XRGB8888;
+
+ lsdc_wreg32(ldev, LSDC_CRTC0_CFG_REG, status);
+}
+
+/* Primary plane 1 hardware related ops */
+
+static void lsdc_primary1_update_fb_addr(struct lsdc_primary *primary, u64 addr)
+{
+ struct lsdc_device *ldev = primary->ldev;
+ u32 status;
+ u32 lo, hi;
+
+ /* 40-bit width physical address bus */
+ lo = addr & 0xFFFFFFFF;
+ hi = (addr >> 32) & 0xFF;
+
+ status = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
+ if (status & FB_REG_IN_USING) {
+ lsdc_wreg32(ldev, LSDC_CRTC1_FB1_ADDR_LO_REG, lo);
+ lsdc_wreg32(ldev, LSDC_CRTC1_FB1_ADDR_HI_REG, hi);
+ } else {
+ lsdc_wreg32(ldev, LSDC_CRTC1_FB0_ADDR_LO_REG, lo);
+ lsdc_wreg32(ldev, LSDC_CRTC1_FB0_ADDR_HI_REG, hi);
+ }
+}
+
+static void lsdc_primary1_update_fb_stride(struct lsdc_primary *primary, u32 stride)
+{
+ struct lsdc_device *ldev = primary->ldev;
+
+ lsdc_wreg32(ldev, LSDC_CRTC1_STRIDE_REG, stride);
+}
+
+static void lsdc_primary1_update_fb_format(struct lsdc_primary *primary,
+ const struct drm_format_info *format)
+{
+ struct lsdc_device *ldev = primary->ldev;
+ u32 status;
+
+ status = lsdc_rreg32(ldev, LSDC_CRTC1_CFG_REG);
+
+ /*
+ * TODO: add RGB565 support, only support XRBG8888 at present
+ */
+ status &= ~CFG_PIX_FMT_MASK;
+ status |= LSDC_PF_XRGB8888;
+
+ lsdc_wreg32(ldev, LSDC_CRTC1_CFG_REG, status);
+}
+
+static const struct lsdc_primary_plane_ops lsdc_primary_plane_hw_ops[2] = {
+ {
+ .update_fb_addr = lsdc_primary0_update_fb_addr,
+ .update_fb_stride = lsdc_primary0_update_fb_stride,
+ .update_fb_format = lsdc_primary0_update_fb_format,
+ },
+ {
+ .update_fb_addr = lsdc_primary1_update_fb_addr,
+ .update_fb_stride = lsdc_primary1_update_fb_stride,
+ .update_fb_format = lsdc_primary1_update_fb_format,
+ },
+};
+
+/*
+ * Update location, format, enable and disable state of the cursor,
+ * For those who have two hardware cursor, let cursor 0 is attach to CRTC-0,
+ * cursor 1 is attach to CRTC-1. Compositing the primary plane and cursor
+ * plane is automatically done by hardware, the cursor is alway on the top of
+ * the primary plane. In other word, z-order is fixed in hardware and cannot
+ * be changed. For those old DC who has only one hardware cursor, we made it
+ * shared by the two screen, this works on extend screen mode.
+ */
+
+/* cursor plane 0 (for pipe 0) related hardware ops */
+
+static void lsdc_cursor0_update_bo_addr(struct lsdc_cursor *cursor, u64 addr)
+{
+ struct lsdc_device *ldev = cursor->ldev;
+
+ /* 40-bit width physical address bus */
+ lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_HI_REG, (addr >> 32) & 0xFF);
+ lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_LO_REG, addr);
+}
+
+static void lsdc_cursor0_update_position(struct lsdc_cursor *cursor, int x, int y)
+{
+ struct lsdc_device *ldev = cursor->ldev;
+
+ if (x < 0)
+ x = 0;
+
+ if (y < 0)
+ y = 0;
+
+ lsdc_wreg32(ldev, LSDC_CURSOR0_POSITION_REG, (y << 16) | x);
+}
+
+static void lsdc_cursor0_update_cfg(struct lsdc_cursor *cursor,
+ enum lsdc_cursor_size cursor_size,
+ enum lsdc_cursor_format fmt)
+{
+ struct lsdc_device *ldev = cursor->ldev;
+ u32 cfg;
+
+ cfg = CURSOR_ON_CRTC0 << CURSOR_LOCATION_SHIFT |
+ cursor_size << CURSOR_SIZE_SHIFT |
+ fmt << CURSOR_FORMAT_SHIFT;
+
+ lsdc_wreg32(ldev, LSDC_CURSOR0_CFG_REG, cfg);
+}
+
+/* cursor plane 1 (for pipe 1) related hardware ops */
+
+static void lsdc_cursor1_update_bo_addr(struct lsdc_cursor *cursor, u64 addr)
+{
+ struct lsdc_device *ldev = cursor->ldev;
+
+ /* 40-bit width physical address bus */
+ lsdc_wreg32(ldev, LSDC_CURSOR1_ADDR_HI_REG, (addr >> 32) & 0xFF);
+ lsdc_wreg32(ldev, LSDC_CURSOR1_ADDR_LO_REG, addr);
+}
+
+static void lsdc_cursor1_update_position(struct lsdc_cursor *cursor, int x, int y)
+{
+ struct lsdc_device *ldev = cursor->ldev;
+
+ if (x < 0)
+ x = 0;
+
+ if (y < 0)
+ y = 0;
+
+ lsdc_wreg32(ldev, LSDC_CURSOR1_POSITION_REG, (y << 16) | x);
+}
+
+static void lsdc_cursor1_update_cfg(struct lsdc_cursor *cursor,
+ enum lsdc_cursor_size cursor_size,
+ enum lsdc_cursor_format fmt)
+{
+ struct lsdc_device *ldev = cursor->ldev;
+ u32 cfg;
+
+ cfg = CURSOR_ON_CRTC1 << CURSOR_LOCATION_SHIFT |
+ cursor_size << CURSOR_SIZE_SHIFT |
+ fmt << CURSOR_FORMAT_SHIFT;
+
+ lsdc_wreg32(ldev, LSDC_CURSOR1_CFG_REG, cfg);
+}
+
+/* The hardware cursors become normal since ls7a2000/ls2k2000 */
+
+static const struct lsdc_cursor_plane_ops ls7a2000_cursor_hw_ops[2] = {
+ {
+ .update_bo_addr = lsdc_cursor0_update_bo_addr,
+ .update_cfg = lsdc_cursor0_update_cfg,
+ .update_position = lsdc_cursor0_update_position,
+ },
+ {
+ .update_bo_addr = lsdc_cursor1_update_bo_addr,
+ .update_cfg = lsdc_cursor1_update_cfg,
+ .update_position = lsdc_cursor1_update_position,
+ },
+};
+
+/* Quirks for cursor 1, only for old loongson display controller */
+
+static void lsdc_cursor1_update_bo_addr_quirk(struct lsdc_cursor *cursor, u64 addr)
+{
+ struct lsdc_device *ldev = cursor->ldev;
+
+ /* 40-bit width physical address bus */
+ lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_HI_REG, (addr >> 32) & 0xFF);
+ lsdc_wreg32(ldev, LSDC_CURSOR0_ADDR_LO_REG, addr);
+}
+
+static void lsdc_cursor1_update_position_quirk(struct lsdc_cursor *cursor, int x, int y)
+{
+ struct lsdc_device *ldev = cursor->ldev;
+
+ if (x < 0)
+ x = 0;
+
+ if (y < 0)
+ y = 0;
+
+ lsdc_wreg32(ldev, LSDC_CURSOR0_POSITION_REG, (y << 16) | x);
+}
+
+static void lsdc_cursor1_update_cfg_quirk(struct lsdc_cursor *cursor,
+ enum lsdc_cursor_size cursor_size,
+ enum lsdc_cursor_format fmt)
+{
+ struct lsdc_device *ldev = cursor->ldev;
+ u32 cfg;
+
+ cfg = CURSOR_ON_CRTC1 << CURSOR_LOCATION_SHIFT |
+ cursor_size << CURSOR_SIZE_SHIFT |
+ fmt << CURSOR_FORMAT_SHIFT;
+
+ lsdc_wreg32(ldev, LSDC_CURSOR0_CFG_REG, cfg);
+}
+
+/*
+ * The unforgiving LS7A1000/LS2K1000 has only one hardware cursors plane
+ */
+static const struct lsdc_cursor_plane_ops ls7a1000_cursor_hw_ops[2] = {
+ {
+ .update_bo_addr = lsdc_cursor0_update_bo_addr,
+ .update_cfg = lsdc_cursor0_update_cfg,
+ .update_position = lsdc_cursor0_update_position,
+ },
+ {
+ .update_bo_addr = lsdc_cursor1_update_bo_addr_quirk,
+ .update_cfg = lsdc_cursor1_update_cfg_quirk,
+ .update_position = lsdc_cursor1_update_position_quirk,
+ },
+};
+
+int lsdc_primary_plane_init(struct drm_device *ddev,
+ struct drm_plane *plane,
+ unsigned int index)
+{
+ struct lsdc_primary *primary = to_lsdc_primary(plane);
+ int ret;
+
+ ret = drm_universal_plane_init(ddev, plane, 1 << index,
+ &lsdc_plane_funcs,
+ lsdc_primary_formats,
+ ARRAY_SIZE(lsdc_primary_formats),
+ lsdc_fb_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ "ls-primary-plane-%u", index);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(plane, &lsdc_primary_helper_funcs);
+
+ primary->ldev = to_lsdc(ddev);
+ primary->ops = &lsdc_primary_plane_hw_ops[index];
+
+ return 0;
+}
+
+int ls7a1000_cursor_plane_init(struct drm_device *ddev,
+ struct drm_plane *plane,
+ unsigned int index)
+{
+ struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+ int ret;
+
+ ret = drm_universal_plane_init(ddev, plane, 1 << index,
+ &lsdc_plane_funcs,
+ lsdc_cursor_formats,
+ ARRAY_SIZE(lsdc_cursor_formats),
+ lsdc_fb_format_modifiers,
+ DRM_PLANE_TYPE_CURSOR,
+ "ls-cursor-plane-%u", index);
+ if (ret)
+ return ret;
+
+ cursor->ldev = to_lsdc(ddev);
+ cursor->ops = &ls7a1000_cursor_hw_ops[index];
+
+ drm_plane_helper_add(plane, &ls7a1000_cursor_plane_helper_funcs);
+
+ return 0;
+}
+
+int ls7a2000_cursor_plane_init(struct drm_device *ddev,
+ struct drm_plane *plane,
+ unsigned int index)
+{
+ struct lsdc_cursor *cursor = to_lsdc_cursor(plane);
+ int ret;
+
+ ret = drm_universal_plane_init(ddev, plane, 1 << index,
+ &lsdc_plane_funcs,
+ lsdc_cursor_formats,
+ ARRAY_SIZE(lsdc_cursor_formats),
+ lsdc_fb_format_modifiers,
+ DRM_PLANE_TYPE_CURSOR,
+ "ls-cursor-plane-%u", index);
+ if (ret)
+ return ret;
+
+ cursor->ldev = to_lsdc(ddev);
+ cursor->ops = &ls7a2000_cursor_hw_ops[index];
+
+ drm_plane_helper_add(plane, &ls7a2000_cursor_plane_helper_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_probe.c b/drivers/gpu/drm/loongson/lsdc_probe.c
new file mode 100644
index 000000000000..48ba69bb8a98
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_probe.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include "lsdc_drv.h"
+#include "lsdc_probe.h"
+
+/*
+ * Processor ID (implementation) values for bits 15:8 of the PRID register.
+ */
+#define LOONGSON_CPU_IMP_MASK 0xff00
+#define LOONGSON_CPU_IMP_SHIFT 8
+
+#define LOONGARCH_CPU_IMP_LS2K1000 0xa0
+#define LOONGARCH_CPU_IMP_LS2K2000 0xb0
+#define LOONGARCH_CPU_IMP_LS3A5000 0xc0
+
+#define LOONGSON_CPU_MIPS_IMP_LS2K 0x61 /* Loongson 2K Mips series SoC */
+
+/*
+ * Particular Revision values for bits 7:0 of the PRID register.
+ */
+#define LOONGSON_CPU_REV_MASK 0x00ff
+
+#define LOONGARCH_CPUCFG_PRID_REG 0x0
+
+/*
+ * We can achieve fine-grained control with the information about the host.
+ */
+
+unsigned int loongson_cpu_get_prid(u8 *imp, u8 *rev)
+{
+ unsigned int prid = 0;
+
+#if defined(__loongarch__)
+ __asm__ volatile("cpucfg %0, %1\n\t"
+ : "=&r"(prid)
+ : "r"(LOONGARCH_CPUCFG_PRID_REG)
+ );
+#endif
+
+#if defined(__mips__)
+ __asm__ volatile("mfc0\t%0, $15\n\t"
+ : "=r" (prid)
+ );
+#endif
+
+ if (imp)
+ *imp = (prid & LOONGSON_CPU_IMP_MASK) >> LOONGSON_CPU_IMP_SHIFT;
+
+ if (rev)
+ *rev = prid & LOONGSON_CPU_REV_MASK;
+
+ return prid;
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_probe.h b/drivers/gpu/drm/loongson/lsdc_probe.h
new file mode 100644
index 000000000000..8bb6de2e3c64
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_probe.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_PROBE_H__
+#define __LSDC_PROBE_H__
+
+/* Helpers for chip detection */
+unsigned int loongson_cpu_get_prid(u8 *impl, u8 *rev);
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_regs.h b/drivers/gpu/drm/loongson/lsdc_regs.h
new file mode 100644
index 000000000000..e8ea28689c63
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_regs.h
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_REGS_H__
+#define __LSDC_REGS_H__
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/*
+ * PIXEL PLL Reference clock
+ */
+#define LSDC_PLL_REF_CLK_KHZ 100000
+
+/*
+ * Those PLL registers are relative to LSxxxxx_CFG_REG_BASE. xxxxx = 7A1000,
+ * 7A2000, 2K2000, 2K1000 etc.
+ */
+
+/* LS7A1000 */
+
+#define LS7A1000_PIXPLL0_REG 0x04B0
+#define LS7A1000_PIXPLL1_REG 0x04C0
+
+/* The DC, GPU, Graphic Memory Controller share the single gfxpll */
+#define LS7A1000_PLL_GFX_REG 0x0490
+
+#define LS7A1000_CONF_REG_BASE 0x10010000
+
+/* LS7A2000 */
+
+#define LS7A2000_PIXPLL0_REG 0x04B0
+#define LS7A2000_PIXPLL1_REG 0x04C0
+
+/* The DC, GPU, Graphic Memory Controller share the single gfxpll */
+#define LS7A2000_PLL_GFX_REG 0x0490
+
+#define LS7A2000_CONF_REG_BASE 0x10010000
+
+/* For LSDC_CRTCx_CFG_REG */
+#define CFG_PIX_FMT_MASK GENMASK(2, 0)
+
+enum lsdc_pixel_format {
+ LSDC_PF_NONE = 0,
+ LSDC_PF_XRGB444 = 1, /* [12 bits] */
+ LSDC_PF_XRGB555 = 2, /* [15 bits] */
+ LSDC_PF_XRGB565 = 3, /* RGB [16 bits] */
+ LSDC_PF_XRGB8888 = 4, /* XRGB [32 bits] */
+};
+
+/*
+ * Each crtc has two set fb address registers usable, FB_REG_IN_USING bit of
+ * LSDC_CRTCx_CFG_REG indicate which fb address register is in using by the
+ * CRTC currently. CFG_PAGE_FLIP is used to trigger the switch, the switching
+ * will be finished at the very next vblank. Trigger it again if you want to
+ * switch back.
+ *
+ * If FB0_ADDR_REG is in using, we write the address to FB0_ADDR_REG,
+ * if FB1_ADDR_REG is in using, we write the address to FB1_ADDR_REG.
+ */
+#define CFG_PAGE_FLIP BIT(7)
+#define CFG_OUTPUT_ENABLE BIT(8)
+#define CFG_HW_CLONE BIT(9)
+/* Indicate witch fb addr reg is in using, currently. read only */
+#define FB_REG_IN_USING BIT(11)
+#define CFG_GAMMA_EN BIT(12)
+
+/* The DC get soft reset if this bit changed from "1" to "0", active low */
+#define CFG_RESET_N BIT(20)
+/* If this bit is set, it say that the CRTC stop working anymore, anchored. */
+#define CRTC_ANCHORED BIT(24)
+
+/*
+ * The DMA step of the DC in LS7A2000/LS2K2000 is configurable,
+ * setting those bits on ls7a1000 platform make no effect.
+ */
+#define CFG_DMA_STEP_MASK GENMASK(17, 16)
+#define CFG_DMA_STEP_SHIFT 16
+enum lsdc_dma_steps {
+ LSDC_DMA_STEP_256_BYTES = 0,
+ LSDC_DMA_STEP_128_BYTES = 1,
+ LSDC_DMA_STEP_64_BYTES = 2,
+ LSDC_DMA_STEP_32_BYTES = 3,
+};
+
+#define CFG_VALID_BITS_MASK GENMASK(20, 0)
+
+/* For LSDC_CRTCx_HSYNC_REG */
+#define HSYNC_INV BIT(31)
+#define HSYNC_EN BIT(30)
+#define HSYNC_END_MASK GENMASK(28, 16)
+#define HSYNC_END_SHIFT 16
+#define HSYNC_START_MASK GENMASK(12, 0)
+#define HSYNC_START_SHIFT 0
+
+/* For LSDC_CRTCx_VSYNC_REG */
+#define VSYNC_INV BIT(31)
+#define VSYNC_EN BIT(30)
+#define VSYNC_END_MASK GENMASK(27, 16)
+#define VSYNC_END_SHIFT 16
+#define VSYNC_START_MASK GENMASK(11, 0)
+#define VSYNC_START_SHIFT 0
+
+/*********** CRTC0 ***********/
+#define LSDC_CRTC0_CFG_REG 0x1240
+#define LSDC_CRTC0_FB0_ADDR_LO_REG 0x1260
+#define LSDC_CRTC0_FB0_ADDR_HI_REG 0x15A0
+#define LSDC_CRTC0_STRIDE_REG 0x1280
+#define LSDC_CRTC0_FB_ORIGIN_REG 0x1300
+#define LSDC_CRTC0_HDISPLAY_REG 0x1400
+#define LSDC_CRTC0_HSYNC_REG 0x1420
+#define LSDC_CRTC0_VDISPLAY_REG 0x1480
+#define LSDC_CRTC0_VSYNC_REG 0x14A0
+#define LSDC_CRTC0_GAMMA_INDEX_REG 0x14E0
+#define LSDC_CRTC0_GAMMA_DATA_REG 0x1500
+#define LSDC_CRTC0_FB1_ADDR_LO_REG 0x1580
+#define LSDC_CRTC0_FB1_ADDR_HI_REG 0x15C0
+
+/*********** CRTC1 ***********/
+#define LSDC_CRTC1_CFG_REG 0x1250
+#define LSDC_CRTC1_FB0_ADDR_LO_REG 0x1270
+#define LSDC_CRTC1_FB0_ADDR_HI_REG 0x15B0
+#define LSDC_CRTC1_STRIDE_REG 0x1290
+#define LSDC_CRTC1_FB_ORIGIN_REG 0x1310
+#define LSDC_CRTC1_HDISPLAY_REG 0x1410
+#define LSDC_CRTC1_HSYNC_REG 0x1430
+#define LSDC_CRTC1_VDISPLAY_REG 0x1490
+#define LSDC_CRTC1_VSYNC_REG 0x14B0
+#define LSDC_CRTC1_GAMMA_INDEX_REG 0x14F0
+#define LSDC_CRTC1_GAMMA_DATA_REG 0x1510
+#define LSDC_CRTC1_FB1_ADDR_LO_REG 0x1590
+#define LSDC_CRTC1_FB1_ADDR_HI_REG 0x15D0
+
+/* For LSDC_CRTCx_DVO_CONF_REG */
+#define PHY_CLOCK_POL BIT(9)
+#define PHY_CLOCK_EN BIT(8)
+#define PHY_DE_POL BIT(1)
+#define PHY_DATA_EN BIT(0)
+
+/*********** DVO0 ***********/
+#define LSDC_CRTC0_DVO_CONF_REG 0x13C0
+
+/*********** DVO1 ***********/
+#define LSDC_CRTC1_DVO_CONF_REG 0x13D0
+
+/*
+ * All of the DC variants has the hardware which record the scan position
+ * of the CRTC, [31:16] : current X position, [15:0] : current Y position
+ */
+#define LSDC_CRTC0_SCAN_POS_REG 0x14C0
+#define LSDC_CRTC1_SCAN_POS_REG 0x14D0
+
+/*
+ * LS7A2000 has Sync Deviation register.
+ */
+#define SYNC_DEVIATION_EN BIT(31)
+#define SYNC_DEVIATION_NUM GENMASK(12, 0)
+#define LSDC_CRTC0_SYNC_DEVIATION_REG 0x1B80
+#define LSDC_CRTC1_SYNC_DEVIATION_REG 0x1B90
+
+/*
+ * In gross, LSDC_CRTC1_XXX_REG - LSDC_CRTC0_XXX_REG = 0x10, but not all of
+ * the registers obey this rule, LSDC_CURSORx_XXX_REG just don't honor this.
+ * This is the root cause we can't untangle the code by manpulating offset
+ * of the register access simply. Our hardware engineers are lack experiance
+ * when they design this...
+ */
+#define CRTC_PIPE_OFFSET 0x10
+
+/*
+ * There is only one hardware cursor unit in LS7A1000 and LS2K1000, let
+ * CFG_HW_CLONE_EN bit be "1" could eliminate this embarrassment, we made
+ * it on custom clone mode application. While LS7A2000 has two hardware
+ * cursor unit which is good enough.
+ */
+#define CURSOR_FORMAT_MASK GENMASK(1, 0)
+#define CURSOR_FORMAT_SHIFT 0
+enum lsdc_cursor_format {
+ CURSOR_FORMAT_DISABLE = 0,
+ CURSOR_FORMAT_MONOCHROME = 1, /* masked */
+ CURSOR_FORMAT_ARGB8888 = 2, /* A8R8G8B8 */
+};
+
+/*
+ * LS7A1000 and LS2K1000 only support 32x32, LS2K2000 and LS7A2000 support
+ * 64x64, but it seems that setting this bit make no harms on LS7A1000, it
+ * just don't take effects.
+ */
+#define CURSOR_SIZE_SHIFT 2
+enum lsdc_cursor_size {
+ CURSOR_SIZE_32X32 = 0,
+ CURSOR_SIZE_64X64 = 1,
+};
+
+#define CURSOR_LOCATION_SHIFT 4
+enum lsdc_cursor_location {
+ CURSOR_ON_CRTC0 = 0,
+ CURSOR_ON_CRTC1 = 1,
+};
+
+#define LSDC_CURSOR0_CFG_REG 0x1520
+#define LSDC_CURSOR0_ADDR_LO_REG 0x1530
+#define LSDC_CURSOR0_ADDR_HI_REG 0x15e0
+#define LSDC_CURSOR0_POSITION_REG 0x1540 /* [31:16] Y, [15:0] X */
+#define LSDC_CURSOR0_BG_COLOR_REG 0x1550 /* background color */
+#define LSDC_CURSOR0_FG_COLOR_REG 0x1560 /* foreground color */
+
+#define LSDC_CURSOR1_CFG_REG 0x1670
+#define LSDC_CURSOR1_ADDR_LO_REG 0x1680
+#define LSDC_CURSOR1_ADDR_HI_REG 0x16e0
+#define LSDC_CURSOR1_POSITION_REG 0x1690 /* [31:16] Y, [15:0] X */
+#define LSDC_CURSOR1_BG_COLOR_REG 0x16A0 /* background color */
+#define LSDC_CURSOR1_FG_COLOR_REG 0x16B0 /* foreground color */
+
+/*
+ * DC Interrupt Control Register, 32bit, Address Offset: 1570
+ *
+ * Bits 15:0 inidicate the interrupt status
+ * Bits 31:16 control enable interrupts corresponding to bit 15:0 or not
+ * Write 1 to enable, write 0 to disable
+ *
+ * RF: Read Finished
+ * IDBU: Internal Data Buffer Underflow
+ * IDBFU: Internal Data Buffer Fatal Underflow
+ * CBRF: Cursor Buffer Read Finished Flag, no use.
+ * FBRF0: CRTC-0 reading from its framebuffer finished.
+ * FBRF1: CRTC-1 reading from its framebuffer finished.
+ *
+ * +-------+--------------------------+-------+--------+--------+-------+
+ * | 31:27 | 26:16 | 15:11 | 10 | 9 | 8 |
+ * +-------+--------------------------+-------+--------+--------+-------+
+ * | N/A | Interrupt Enable Control | N/A | IDBFU0 | IDBFU1 | IDBU0 |
+ * +-------+--------------------------+-------+--------+--------+-------+
+ *
+ * +-------+-------+-------+------+--------+--------+--------+--------+
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * +-------+-------+-------+------+--------+--------+--------+--------+
+ * | IDBU1 | FBRF0 | FBRF1 | CRRF | HSYNC0 | VSYNC0 | HSYNC1 | VSYNC1 |
+ * +-------+-------+-------+------+--------+--------+--------+--------+
+ *
+ * unfortunately, CRTC0's interrupt is mess with CRTC1's interrupt in one
+ * register again.
+ */
+
+#define LSDC_INT_REG 0x1570
+
+#define INT_CRTC0_VSYNC BIT(2)
+#define INT_CRTC0_HSYNC BIT(3)
+#define INT_CRTC0_RF BIT(6)
+#define INT_CRTC0_IDBU BIT(8)
+#define INT_CRTC0_IDBFU BIT(10)
+
+#define INT_CRTC1_VSYNC BIT(0)
+#define INT_CRTC1_HSYNC BIT(1)
+#define INT_CRTC1_RF BIT(5)
+#define INT_CRTC1_IDBU BIT(7)
+#define INT_CRTC1_IDBFU BIT(9)
+
+#define INT_CRTC0_VSYNC_EN BIT(18)
+#define INT_CRTC0_HSYNC_EN BIT(19)
+#define INT_CRTC0_RF_EN BIT(22)
+#define INT_CRTC0_IDBU_EN BIT(24)
+#define INT_CRTC0_IDBFU_EN BIT(26)
+
+#define INT_CRTC1_VSYNC_EN BIT(16)
+#define INT_CRTC1_HSYNC_EN BIT(17)
+#define INT_CRTC1_RF_EN BIT(21)
+#define INT_CRTC1_IDBU_EN BIT(23)
+#define INT_CRTC1_IDBFU_EN BIT(25)
+
+#define INT_STATUS_MASK GENMASK(15, 0)
+
+/*
+ * LS7A1000/LS7A2000 have 4 gpios which are used to emulated I2C.
+ * They are under control of the LS7A_DC_GPIO_DAT_REG and LS7A_DC_GPIO_DIR_REG
+ * register, Those GPIOs has no relationship whth the GPIO hardware on the
+ * bridge chip itself. Those offsets are relative to DC register base address
+ *
+ * LS2k1000 don't have those registers, they use hardware i2c or general GPIO
+ * emulated i2c from linux i2c subsystem.
+ *
+ * GPIO data register, address offset: 0x1650
+ * +---------------+-----------+-----------+
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * +---------------+-----------+-----------+
+ * | | DVO1 | DVO0 |
+ * + N/A +-----------+-----------+
+ * | | SCL | SDA | SCL | SDA |
+ * +---------------+-----------+-----------+
+ */
+#define LS7A_DC_GPIO_DAT_REG 0x1650
+
+/*
+ * GPIO Input/Output direction control register, address offset: 0x1660
+ */
+#define LS7A_DC_GPIO_DIR_REG 0x1660
+
+/*
+ * LS7A2000 has two built-in HDMI Encoder and one VGA encoder
+ */
+
+/*
+ * Number of continuous packets may be present
+ * in HDMI hblank and vblank zone, should >= 48
+ */
+#define LSDC_HDMI0_ZONE_REG 0x1700
+#define LSDC_HDMI1_ZONE_REG 0x1710
+
+#define HDMI_H_ZONE_IDLE_SHIFT 0
+#define HDMI_V_ZONE_IDLE_SHIFT 16
+
+/* HDMI Iterface Control Reg */
+#define HDMI_INTERFACE_EN BIT(0)
+#define HDMI_PACKET_EN BIT(1)
+#define HDMI_AUDIO_EN BIT(2)
+/*
+ * Preamble:
+ * Immediately preceding each video data period or data island period is the
+ * preamble. This is a sequence of eight identical control characters that
+ * indicate whether the upcoming data period is a video data period or is a
+ * data island. The values of CTL0, CTL1, CTL2, and CTL3 indicate the type of
+ * data period that follows.
+ */
+#define HDMI_VIDEO_PREAMBLE_MASK GENMASK(7, 4)
+#define HDMI_VIDEO_PREAMBLE_SHIFT 4
+/* 1: hw i2c, 0: gpio emu i2c, shouldn't put in LSDC_HDMIx_INTF_CTRL_REG */
+#define HW_I2C_EN BIT(8)
+#define HDMI_CTL_PERIOD_MODE BIT(9)
+#define LSDC_HDMI0_INTF_CTRL_REG 0x1720
+#define LSDC_HDMI1_INTF_CTRL_REG 0x1730
+
+#define HDMI_PHY_EN BIT(0)
+#define HDMI_PHY_RESET_N BIT(1)
+#define HDMI_PHY_TERM_L_EN BIT(8)
+#define HDMI_PHY_TERM_H_EN BIT(9)
+#define HDMI_PHY_TERM_DET_EN BIT(10)
+#define HDMI_PHY_TERM_STATUS BIT(11)
+#define LSDC_HDMI0_PHY_CTRL_REG 0x1800
+#define LSDC_HDMI1_PHY_CTRL_REG 0x1810
+
+/* High level duration need > 1us */
+#define HDMI_PLL_ENABLE BIT(0)
+#define HDMI_PLL_LOCKED BIT(16)
+/* Bypass the software configured values, using default source from somewhere */
+#define HDMI_PLL_BYPASS BIT(17)
+
+#define HDMI_PLL_IDF_SHIFT 1
+#define HDMI_PLL_IDF_MASK GENMASK(5, 1)
+#define HDMI_PLL_LF_SHIFT 6
+#define HDMI_PLL_LF_MASK GENMASK(12, 6)
+#define HDMI_PLL_ODF_SHIFT 13
+#define HDMI_PLL_ODF_MASK GENMASK(15, 13)
+#define LSDC_HDMI0_PHY_PLL_REG 0x1820
+#define LSDC_HDMI1_PHY_PLL_REG 0x1830
+
+/* LS7A2000/LS2K2000 has hpd status reg, while the two hdmi's status
+ * located at the one register again.
+ */
+#define LSDC_HDMI_HPD_STATUS_REG 0x1BA0
+#define HDMI0_HPD_FLAG BIT(0)
+#define HDMI1_HPD_FLAG BIT(1)
+
+#define LSDC_HDMI0_PHY_CAL_REG 0x18C0
+#define LSDC_HDMI1_PHY_CAL_REG 0x18D0
+
+/* AVI InfoFrame */
+#define LSDC_HDMI0_AVI_CONTENT0 0x18E0
+#define LSDC_HDMI1_AVI_CONTENT0 0x18D0
+#define LSDC_HDMI0_AVI_CONTENT1 0x1900
+#define LSDC_HDMI1_AVI_CONTENT1 0x1910
+#define LSDC_HDMI0_AVI_CONTENT2 0x1920
+#define LSDC_HDMI1_AVI_CONTENT2 0x1930
+#define LSDC_HDMI0_AVI_CONTENT3 0x1940
+#define LSDC_HDMI1_AVI_CONTENT3 0x1950
+
+/* 1: enable avi infoframe packet, 0: disable avi infoframe packet */
+#define AVI_PKT_ENABLE BIT(0)
+/* 1: send one every two frame, 0: send one each frame */
+#define AVI_PKT_SEND_FREQ BIT(1)
+/*
+ * 1: write 1 to flush avi reg content0 ~ content3 to the packet to be send,
+ * The hardware will clear this bit automatically.
+ */
+#define AVI_PKT_UPDATE BIT(2)
+
+#define LSDC_HDMI0_AVI_INFO_CRTL_REG 0x1960
+#define LSDC_HDMI1_AVI_INFO_CRTL_REG 0x1970
+
+/*
+ * LS7A2000 has the hardware which count the number of vblank generated
+ */
+#define LSDC_CRTC0_VSYNC_COUNTER_REG 0x1A00
+#define LSDC_CRTC1_VSYNC_COUNTER_REG 0x1A10
+
+/*
+ * LS7A2000 has the audio hardware associate with the HDMI encoder.
+ */
+#define LSDC_HDMI0_AUDIO_PLL_LO_REG 0x1A20
+#define LSDC_HDMI1_AUDIO_PLL_LO_REG 0x1A30
+
+#define LSDC_HDMI0_AUDIO_PLL_HI_REG 0x1A40
+#define LSDC_HDMI1_AUDIO_PLL_HI_REG 0x1A50
+
+#endif
diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.c b/drivers/gpu/drm/loongson/lsdc_ttm.c
new file mode 100644
index 000000000000..bf79dc55afa4
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_ttm.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_prime.h>
+
+#include "lsdc_drv.h"
+#include "lsdc_ttm.h"
+
+const char *lsdc_mem_type_to_str(uint32_t mem_type)
+{
+ switch (mem_type) {
+ case TTM_PL_VRAM:
+ return "VRAM";
+ case TTM_PL_TT:
+ return "GTT";
+ case TTM_PL_SYSTEM:
+ return "SYSTEM";
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+const char *lsdc_domain_to_str(u32 domain)
+{
+ switch (domain) {
+ case LSDC_GEM_DOMAIN_VRAM:
+ return "VRAM";
+ case LSDC_GEM_DOMAIN_GTT:
+ return "GTT";
+ case LSDC_GEM_DOMAIN_SYSTEM:
+ return "SYSTEM";
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+static void lsdc_bo_set_placement(struct lsdc_bo *lbo, u32 domain)
+{
+ u32 c = 0;
+ u32 pflags = 0;
+ u32 i;
+
+ if (lbo->tbo.base.size <= PAGE_SIZE)
+ pflags |= TTM_PL_FLAG_TOPDOWN;
+
+ lbo->placement.placement = lbo->placements;
+ lbo->placement.busy_placement = lbo->placements;
+
+ if (domain & LSDC_GEM_DOMAIN_VRAM) {
+ lbo->placements[c].mem_type = TTM_PL_VRAM;
+ lbo->placements[c++].flags = pflags;
+ }
+
+ if (domain & LSDC_GEM_DOMAIN_GTT) {
+ lbo->placements[c].mem_type = TTM_PL_TT;
+ lbo->placements[c++].flags = pflags;
+ }
+
+ if (domain & LSDC_GEM_DOMAIN_SYSTEM) {
+ lbo->placements[c].mem_type = TTM_PL_SYSTEM;
+ lbo->placements[c++].flags = 0;
+ }
+
+ if (!c) {
+ lbo->placements[c].mem_type = TTM_PL_SYSTEM;
+ lbo->placements[c++].flags = 0;
+ }
+
+ lbo->placement.num_placement = c;
+ lbo->placement.num_busy_placement = c;
+
+ for (i = 0; i < c; ++i) {
+ lbo->placements[i].fpfn = 0;
+ lbo->placements[i].lpfn = 0;
+ }
+}
+
+static void lsdc_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_tt *
+lsdc_ttm_tt_create(struct ttm_buffer_object *tbo, uint32_t page_flags)
+{
+ struct ttm_tt *tt;
+ int ret;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt)
+ return NULL;
+
+ ret = ttm_sg_tt_init(tt, tbo, page_flags, ttm_cached);
+ if (ret < 0) {
+ kfree(tt);
+ return NULL;
+ }
+
+ return tt;
+}
+
+static int lsdc_ttm_tt_populate(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
+{
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
+
+ if (slave && ttm->sg) {
+ drm_prime_sg_to_dma_addr_array(ttm->sg,
+ ttm->dma_address,
+ ttm->num_pages);
+
+ return 0;
+ }
+
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
+}
+
+static void lsdc_ttm_tt_unpopulate(struct ttm_device *bdev,
+ struct ttm_tt *ttm)
+{
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
+
+ if (slave)
+ return;
+
+ return ttm_pool_free(&bdev->pool, ttm);
+}
+
+static void lsdc_bo_evict_flags(struct ttm_buffer_object *tbo,
+ struct ttm_placement *tplacement)
+{
+ struct ttm_resource *resource = tbo->resource;
+ struct lsdc_bo *lbo = to_lsdc_bo(tbo);
+
+ switch (resource->mem_type) {
+ case TTM_PL_VRAM:
+ lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_GTT);
+ break;
+ case TTM_PL_TT:
+ default:
+ lsdc_bo_set_placement(lbo, LSDC_GEM_DOMAIN_SYSTEM);
+ break;
+ }
+
+ *tplacement = lbo->placement;
+}
+
+static int lsdc_bo_move(struct ttm_buffer_object *tbo,
+ bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop)
+{
+ struct drm_device *ddev = tbo->base.dev;
+ struct ttm_resource *old_mem = tbo->resource;
+ struct lsdc_bo *lbo = to_lsdc_bo(tbo);
+ int ret;
+
+ if (unlikely(tbo->pin_count > 0)) {
+ drm_warn(ddev, "Can't move a pinned BO\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_wait_ctx(tbo, ctx);
+ if (ret)
+ return ret;
+
+ if (!old_mem) {
+ drm_dbg(ddev, "bo[%p] move: NULL to %s, size: %zu\n",
+ lbo, lsdc_mem_type_to_str(new_mem->mem_type),
+ lsdc_bo_size(lbo));
+ ttm_bo_move_null(tbo, new_mem);
+ return 0;
+ }
+
+ if (old_mem->mem_type == TTM_PL_SYSTEM && !tbo->ttm) {
+ ttm_bo_move_null(tbo, new_mem);
+ drm_dbg(ddev, "bo[%p] move: SYSTEM to NULL, size: %zu\n",
+ lbo, lsdc_bo_size(lbo));
+ return 0;
+ }
+
+ if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_TT) {
+ drm_dbg(ddev, "bo[%p] move: SYSTEM to GTT, size: %zu\n",
+ lbo, lsdc_bo_size(lbo));
+ ttm_bo_move_null(tbo, new_mem);
+ return 0;
+ }
+
+ if (old_mem->mem_type == TTM_PL_TT &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ drm_dbg(ddev, "bo[%p] move: GTT to SYSTEM, size: %zu\n",
+ lbo, lsdc_bo_size(lbo));
+ ttm_resource_free(tbo, &tbo->resource);
+ ttm_bo_assign_mem(tbo, new_mem);
+ return 0;
+ }
+
+ drm_dbg(ddev, "bo[%p] move: %s to %s, size: %zu\n",
+ lbo,
+ lsdc_mem_type_to_str(old_mem->mem_type),
+ lsdc_mem_type_to_str(new_mem->mem_type),
+ lsdc_bo_size(lbo));
+
+ return ttm_bo_move_memcpy(tbo, ctx, new_mem);
+}
+
+static int lsdc_bo_reserve_io_mem(struct ttm_device *bdev,
+ struct ttm_resource *mem)
+{
+ struct lsdc_device *ldev = tdev_to_ldev(bdev);
+
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ break;
+ case TTM_PL_TT:
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = (mem->start << PAGE_SHIFT) + ldev->vram_base;
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_write_combined;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct ttm_device_funcs lsdc_bo_driver = {
+ .ttm_tt_create = lsdc_ttm_tt_create,
+ .ttm_tt_populate = lsdc_ttm_tt_populate,
+ .ttm_tt_unpopulate = lsdc_ttm_tt_unpopulate,
+ .ttm_tt_destroy = lsdc_ttm_tt_destroy,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = lsdc_bo_evict_flags,
+ .move = lsdc_bo_move,
+ .io_mem_reserve = lsdc_bo_reserve_io_mem,
+};
+
+u64 lsdc_bo_gpu_offset(struct lsdc_bo *lbo)
+{
+ struct ttm_buffer_object *tbo = &lbo->tbo;
+ struct drm_device *ddev = tbo->base.dev;
+ struct ttm_resource *resource = tbo->resource;
+
+ if (unlikely(!tbo->pin_count)) {
+ drm_err(ddev, "unpinned bo, gpu virtual address is invalid\n");
+ return 0;
+ }
+
+ if (unlikely(resource->mem_type == TTM_PL_SYSTEM))
+ return 0;
+
+ return resource->start << PAGE_SHIFT;
+}
+
+size_t lsdc_bo_size(struct lsdc_bo *lbo)
+{
+ struct ttm_buffer_object *tbo = &lbo->tbo;
+
+ return tbo->base.size;
+}
+
+int lsdc_bo_reserve(struct lsdc_bo *lbo)
+{
+ return ttm_bo_reserve(&lbo->tbo, true, false, NULL);
+}
+
+void lsdc_bo_unreserve(struct lsdc_bo *lbo)
+{
+ return ttm_bo_unreserve(&lbo->tbo);
+}
+
+int lsdc_bo_pin(struct lsdc_bo *lbo, u32 domain, u64 *gpu_addr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_buffer_object *tbo = &lbo->tbo;
+ struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
+ int ret;
+
+ if (tbo->pin_count)
+ goto bo_pinned;
+
+ if (lbo->sharing_count && domain == LSDC_GEM_DOMAIN_VRAM)
+ return -EINVAL;
+
+ if (domain)
+ lsdc_bo_set_placement(lbo, domain);
+
+ ret = ttm_bo_validate(tbo, &lbo->placement, &ctx);
+ if (unlikely(ret)) {
+ drm_err(&ldev->base, "%p validate failed: %d\n", lbo, ret);
+ return ret;
+ }
+
+ if (domain == LSDC_GEM_DOMAIN_VRAM)
+ ldev->vram_pinned_size += lsdc_bo_size(lbo);
+ else if (domain == LSDC_GEM_DOMAIN_GTT)
+ ldev->gtt_pinned_size += lsdc_bo_size(lbo);
+
+bo_pinned:
+ ttm_bo_pin(tbo);
+
+ if (gpu_addr)
+ *gpu_addr = lsdc_bo_gpu_offset(lbo);
+
+ return 0;
+}
+
+void lsdc_bo_unpin(struct lsdc_bo *lbo)
+{
+ struct ttm_buffer_object *tbo = &lbo->tbo;
+ struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
+
+ if (unlikely(!tbo->pin_count)) {
+ drm_dbg(&ldev->base, "%p unpin is not necessary\n", lbo);
+ return;
+ }
+
+ ttm_bo_unpin(tbo);
+
+ if (!tbo->pin_count) {
+ if (tbo->resource->mem_type == TTM_PL_VRAM)
+ ldev->vram_pinned_size -= lsdc_bo_size(lbo);
+ else if (tbo->resource->mem_type == TTM_PL_TT)
+ ldev->gtt_pinned_size -= lsdc_bo_size(lbo);
+ }
+}
+
+void lsdc_bo_ref(struct lsdc_bo *lbo)
+{
+ struct ttm_buffer_object *tbo = &lbo->tbo;
+
+ ttm_bo_get(tbo);
+}
+
+void lsdc_bo_unref(struct lsdc_bo *lbo)
+{
+ struct ttm_buffer_object *tbo = &lbo->tbo;
+
+ ttm_bo_put(tbo);
+}
+
+int lsdc_bo_kmap(struct lsdc_bo *lbo)
+{
+ struct ttm_buffer_object *tbo = &lbo->tbo;
+ struct drm_gem_object *gem = &tbo->base;
+ struct drm_device *ddev = gem->dev;
+ long ret;
+ int err;
+
+ ret = dma_resv_wait_timeout(gem->resv, DMA_RESV_USAGE_KERNEL, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0) {
+ drm_warn(ddev, "wait fence timeout\n");
+ return ret;
+ }
+
+ if (lbo->kptr)
+ return 0;
+
+ err = ttm_bo_kmap(tbo, 0, PFN_UP(lsdc_bo_size(lbo)), &lbo->kmap);
+ if (err) {
+ drm_err(ddev, "kmap %p failed: %d\n", lbo, err);
+ return err;
+ }
+
+ lbo->kptr = ttm_kmap_obj_virtual(&lbo->kmap, &lbo->is_iomem);
+
+ return 0;
+}
+
+void lsdc_bo_kunmap(struct lsdc_bo *lbo)
+{
+ if (!lbo->kptr)
+ return;
+
+ lbo->kptr = NULL;
+ ttm_bo_kunmap(&lbo->kmap);
+}
+
+void lsdc_bo_clear(struct lsdc_bo *lbo)
+{
+ lsdc_bo_kmap(lbo);
+
+ if (lbo->is_iomem)
+ memset_io((void __iomem *)lbo->kptr, 0, lbo->size);
+ else
+ memset(lbo->kptr, 0, lbo->size);
+
+ lsdc_bo_kunmap(lbo);
+}
+
+int lsdc_bo_evict_vram(struct drm_device *ddev)
+{
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ struct ttm_device *bdev = &ldev->bdev;
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(bdev, TTM_PL_VRAM);
+ if (unlikely(!man))
+ return 0;
+
+ return ttm_resource_manager_evict_all(bdev, man);
+}
+
+static void lsdc_bo_destroy(struct ttm_buffer_object *tbo)
+{
+ struct lsdc_device *ldev = tdev_to_ldev(tbo->bdev);
+ struct lsdc_bo *lbo = to_lsdc_bo(tbo);
+
+ mutex_lock(&ldev->gem.mutex);
+ list_del_init(&lbo->list);
+ mutex_unlock(&ldev->gem.mutex);
+
+ drm_gem_object_release(&tbo->base);
+
+ kfree(lbo);
+}
+
+struct lsdc_bo *lsdc_bo_create(struct drm_device *ddev,
+ u32 domain,
+ size_t size,
+ bool kernel,
+ struct sg_table *sg,
+ struct dma_resv *resv)
+{
+ struct lsdc_device *ldev = to_lsdc(ddev);
+ struct ttm_device *bdev = &ldev->bdev;
+ struct ttm_buffer_object *tbo;
+ struct lsdc_bo *lbo;
+ enum ttm_bo_type bo_type;
+ int ret;
+
+ lbo = kzalloc(sizeof(*lbo), GFP_KERNEL);
+ if (!lbo)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&lbo->list);
+
+ lbo->initial_domain = domain & (LSDC_GEM_DOMAIN_VRAM |
+ LSDC_GEM_DOMAIN_GTT |
+ LSDC_GEM_DOMAIN_SYSTEM);
+
+ tbo = &lbo->tbo;
+
+ size = ALIGN(size, PAGE_SIZE);
+
+ ret = drm_gem_object_init(ddev, &tbo->base, size);
+ if (ret) {
+ kfree(lbo);
+ return ERR_PTR(ret);
+ }
+
+ tbo->bdev = bdev;
+
+ if (kernel)
+ bo_type = ttm_bo_type_kernel;
+ else if (sg)
+ bo_type = ttm_bo_type_sg;
+ else
+ bo_type = ttm_bo_type_device;
+
+ lsdc_bo_set_placement(lbo, domain);
+ lbo->size = size;
+
+ ret = ttm_bo_init_validate(bdev, tbo, bo_type, &lbo->placement, 0,
+ false, sg, resv, lsdc_bo_destroy);
+ if (ret) {
+ kfree(lbo);
+ return ERR_PTR(ret);
+ }
+
+ return lbo;
+}
+
+struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
+ u32 domain,
+ size_t size)
+{
+ struct lsdc_bo *lbo;
+ int ret;
+
+ lbo = lsdc_bo_create(ddev, domain, size, true, NULL, NULL);
+ if (IS_ERR(lbo))
+ return ERR_CAST(lbo);
+
+ ret = lsdc_bo_reserve(lbo);
+ if (unlikely(ret)) {
+ lsdc_bo_unref(lbo);
+ return ERR_PTR(ret);
+ }
+
+ ret = lsdc_bo_pin(lbo, domain, NULL);
+ lsdc_bo_unreserve(lbo);
+ if (unlikely(ret)) {
+ lsdc_bo_unref(lbo);
+ return ERR_PTR(ret);
+ }
+
+ return lbo;
+}
+
+void lsdc_bo_free_kernel_pinned(struct lsdc_bo *lbo)
+{
+ int ret;
+
+ ret = lsdc_bo_reserve(lbo);
+ if (unlikely(ret))
+ return;
+
+ lsdc_bo_unpin(lbo);
+ lsdc_bo_unreserve(lbo);
+
+ lsdc_bo_unref(lbo);
+}
+
+static void lsdc_ttm_fini(struct drm_device *ddev, void *data)
+{
+ struct lsdc_device *ldev = (struct lsdc_device *)data;
+
+ ttm_range_man_fini(&ldev->bdev, TTM_PL_VRAM);
+ ttm_range_man_fini(&ldev->bdev, TTM_PL_TT);
+
+ ttm_device_fini(&ldev->bdev);
+
+ drm_dbg(ddev, "ttm finished\n");
+}
+
+int lsdc_ttm_init(struct lsdc_device *ldev)
+{
+ struct drm_device *ddev = &ldev->base;
+ unsigned long num_vram_pages;
+ unsigned long num_gtt_pages;
+ int ret;
+
+ ret = ttm_device_init(&ldev->bdev, &lsdc_bo_driver, ddev->dev,
+ ddev->anon_inode->i_mapping,
+ ddev->vma_offset_manager, false, true);
+ if (ret)
+ return ret;
+
+ num_vram_pages = ldev->vram_size >> PAGE_SHIFT;
+
+ ret = ttm_range_man_init(&ldev->bdev, TTM_PL_VRAM, false, num_vram_pages);
+ if (unlikely(ret))
+ return ret;
+
+ drm_info(ddev, "VRAM: %lu pages ready\n", num_vram_pages);
+
+ /* 512M is far enough for us now */
+ ldev->gtt_size = 512 << 20;
+
+ num_gtt_pages = ldev->gtt_size >> PAGE_SHIFT;
+
+ ret = ttm_range_man_init(&ldev->bdev, TTM_PL_TT, true, num_gtt_pages);
+ if (unlikely(ret))
+ return ret;
+
+ drm_info(ddev, "GTT: %lu pages ready\n", num_gtt_pages);
+
+ return drmm_add_action_or_reset(ddev, lsdc_ttm_fini, ldev);
+}
+
+void lsdc_ttm_debugfs_init(struct lsdc_device *ldev)
+{
+ struct ttm_device *bdev = &ldev->bdev;
+ struct drm_device *ddev = &ldev->base;
+ struct drm_minor *minor = ddev->primary;
+ struct dentry *root = minor->debugfs_root;
+ struct ttm_resource_manager *vram_man;
+ struct ttm_resource_manager *gtt_man;
+
+ vram_man = ttm_manager_type(bdev, TTM_PL_VRAM);
+ gtt_man = ttm_manager_type(bdev, TTM_PL_TT);
+
+ ttm_resource_manager_create_debugfs(vram_man, root, "vram_mm");
+ ttm_resource_manager_create_debugfs(gtt_man, root, "gtt_mm");
+}
diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.h b/drivers/gpu/drm/loongson/lsdc_ttm.h
new file mode 100644
index 000000000000..843e1475064e
--- /dev/null
+++ b/drivers/gpu/drm/loongson/lsdc_ttm.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __LSDC_TTM_H__
+#define __LSDC_TTM_H__
+
+#include <linux/container_of.h>
+#include <linux/iosys-map.h>
+#include <linux/list.h>
+
+#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
+
+#define LSDC_GEM_DOMAIN_SYSTEM 0x1
+#define LSDC_GEM_DOMAIN_GTT 0x2
+#define LSDC_GEM_DOMAIN_VRAM 0x4
+
+struct lsdc_bo {
+ struct ttm_buffer_object tbo;
+
+ /* Protected by gem.mutex */
+ struct list_head list;
+
+ struct iosys_map map;
+
+ unsigned int vmap_count;
+ /* cross device driver sharing reference count */
+ unsigned int sharing_count;
+
+ struct ttm_bo_kmap_obj kmap;
+ void *kptr;
+ bool is_iomem;
+
+ size_t size;
+
+ u32 initial_domain;
+
+ struct ttm_placement placement;
+ struct ttm_place placements[4];
+};
+
+static inline struct ttm_buffer_object *to_ttm_bo(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct ttm_buffer_object, base);
+}
+
+static inline struct lsdc_bo *to_lsdc_bo(struct ttm_buffer_object *tbo)
+{
+ return container_of(tbo, struct lsdc_bo, tbo);
+}
+
+static inline struct lsdc_bo *gem_to_lsdc_bo(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct lsdc_bo, tbo.base);
+}
+
+const char *lsdc_mem_type_to_str(uint32_t mem_type);
+const char *lsdc_domain_to_str(u32 domain);
+
+struct lsdc_bo *lsdc_bo_create(struct drm_device *ddev,
+ u32 domain,
+ size_t size,
+ bool kernel,
+ struct sg_table *sg,
+ struct dma_resv *resv);
+
+struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
+ u32 domain,
+ size_t size);
+
+void lsdc_bo_free_kernel_pinned(struct lsdc_bo *lbo);
+
+int lsdc_bo_reserve(struct lsdc_bo *lbo);
+void lsdc_bo_unreserve(struct lsdc_bo *lbo);
+
+int lsdc_bo_pin(struct lsdc_bo *lbo, u32 domain, u64 *gpu_addr);
+void lsdc_bo_unpin(struct lsdc_bo *lbo);
+
+void lsdc_bo_ref(struct lsdc_bo *lbo);
+void lsdc_bo_unref(struct lsdc_bo *lbo);
+
+u64 lsdc_bo_gpu_offset(struct lsdc_bo *lbo);
+size_t lsdc_bo_size(struct lsdc_bo *lbo);
+
+int lsdc_bo_kmap(struct lsdc_bo *lbo);
+void lsdc_bo_kunmap(struct lsdc_bo *lbo);
+void lsdc_bo_clear(struct lsdc_bo *lbo);
+
+int lsdc_bo_evict_vram(struct drm_device *ddev);
+
+int lsdc_ttm_init(struct lsdc_device *ldev);
+void lsdc_ttm_debugfs_init(struct lsdc_device *ldev);
+
+#endif
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index a8cd86c06c14..a2572fb311f0 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -448,7 +448,7 @@ regulator_epod_off:
}
-static int mcde_remove(struct platform_device *pdev)
+static void mcde_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
struct mcde *mcde = to_mcde(drm);
@@ -457,8 +457,6 @@ static int mcde_remove(struct platform_device *pdev)
clk_disable_unprepare(mcde->mcde_clk);
regulator_disable(mcde->vana);
regulator_disable(mcde->epod);
-
- return 0;
}
static const struct of_device_id mcde_of_match[] = {
@@ -471,10 +469,10 @@ static const struct of_device_id mcde_of_match[] = {
static struct platform_driver mcde_driver = {
.driver = {
.name = "mcde",
- .of_match_table = of_match_ptr(mcde_of_match),
+ .of_match_table = mcde_of_match,
},
.probe = mcde_probe,
- .remove = mcde_remove,
+ .remove_new = mcde_remove,
};
static struct platform_driver *const component_drivers[] = {
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 9f9ac8699310..e2fad1a048b5 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1208,14 +1208,12 @@ static int mcde_dsi_probe(struct platform_device *pdev)
return component_add(dev, &mcde_dsi_component_ops);
}
-static int mcde_dsi_remove(struct platform_device *pdev)
+static void mcde_dsi_remove(struct platform_device *pdev)
{
struct mcde_dsi *d = platform_get_drvdata(pdev);
component_del(&pdev->dev, &mcde_dsi_component_ops);
mipi_dsi_host_unregister(&d->dsi_host);
-
- return 0;
}
static const struct of_device_id mcde_dsi_of_match[] = {
@@ -1228,8 +1226,8 @@ static const struct of_device_id mcde_dsi_of_match[] = {
struct platform_driver mcde_dsi_driver = {
.driver = {
.name = "mcde-dsi",
- .of_match_table = of_match_ptr(mcde_dsi_of_match),
+ .of_match_table = mcde_dsi_of_match,
},
.probe = mcde_dsi_probe,
- .remove = mcde_dsi_remove,
+ .remove_new = mcde_dsi_remove,
};
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index b451dee64d34..76cab28e010c 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -26,6 +26,7 @@ config DRM_MEDIATEK_DP
select PHY_MTK_DP
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
+ select DRM_DP_AUX_BUS
help
DRM/KMS Display Port driver for MediaTek SoCs.
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index b640bc0559e7..f47f417d8ba6 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -235,13 +235,12 @@ static int mtk_cec_probe(struct platform_device *pdev)
return 0;
}
-static int mtk_cec_remove(struct platform_device *pdev)
+static void mtk_cec_remove(struct platform_device *pdev)
{
struct mtk_cec *cec = platform_get_drvdata(pdev);
mtk_cec_htplg_irq_disable(cec);
clk_disable_unprepare(cec->clk);
- return 0;
}
static const struct of_device_id mtk_cec_of_ids[] = {
@@ -252,7 +251,7 @@ MODULE_DEVICE_TABLE(of, mtk_cec_of_ids);
struct platform_driver mtk_cec_driver = {
.probe = mtk_cec_probe,
- .remove = mtk_cec_remove,
+ .remove_new = mtk_cec_remove,
.driver = {
.name = "mediatek-cec",
.of_match_table = mtk_cec_of_ids,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
index 434e8a9ce8ab..4da9ac93b29e 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
@@ -6,8 +6,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
@@ -26,11 +25,6 @@ struct mtk_disp_aal_data {
bool has_gamma;
};
-/**
- * struct mtk_disp_aal - DISP_AAL driver structure
- * @ddp_comp - structure containing type enum and hardware resources
- * @crtc - associated crtc to report irq events to
- */
struct mtk_disp_aal {
struct clk *clk;
void __iomem *regs;
@@ -140,11 +134,9 @@ static int mtk_disp_aal_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_aal_remove(struct platform_device *pdev)
+static void mtk_disp_aal_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_aal_component_ops);
-
- return 0;
}
static const struct mtk_disp_aal_data mt8173_aal_driver_data = {
@@ -161,7 +153,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_aal_driver_dt_match);
struct platform_driver mtk_disp_aal_driver = {
.probe = mtk_disp_aal_probe,
- .remove = mtk_disp_aal_remove,
+ .remove_new = mtk_disp_aal_remove,
.driver = {
.name = "mediatek-disp-aal",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
index 1773379b2439..4234ff7485e8 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -6,8 +6,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
@@ -34,11 +33,6 @@ struct mtk_disp_ccorr_data {
u32 matrix_bits;
};
-/**
- * struct mtk_disp_ccorr - DISP_CCORR driver structure
- * @ddp_comp - structure containing type enum and hardware resources
- * @crtc - associated crtc to report irq events to
- */
struct mtk_disp_ccorr {
struct clk *clk;
void __iomem *regs;
@@ -195,11 +189,9 @@ static int mtk_disp_ccorr_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_ccorr_remove(struct platform_device *pdev)
+static void mtk_disp_ccorr_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_ccorr_component_ops);
-
- return 0;
}
static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = {
@@ -221,7 +213,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match);
struct platform_driver mtk_disp_ccorr_driver = {
.probe = mtk_disp_ccorr_probe,
- .remove = mtk_disp_ccorr_remove,
+ .remove_new = mtk_disp_ccorr_remove,
.driver = {
.name = "mediatek-disp-ccorr",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index cac9206079e7..78ea99f1444f 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -6,8 +6,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
@@ -132,11 +131,9 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_color_remove(struct platform_device *pdev)
+static void mtk_disp_color_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_color_component_ops);
-
- return 0;
}
static const struct mtk_disp_color_data mt2701_color_driver_data = {
@@ -164,7 +161,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_color_driver_dt_match);
struct platform_driver mtk_disp_color_driver = {
.probe = mtk_disp_color_probe,
- .remove = mtk_disp_color_remove,
+ .remove_new = mtk_disp_color_remove,
.driver = {
.name = "mediatek-disp-color",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index c844942603f7..673f9a5738f2 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -6,8 +6,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
@@ -183,11 +182,9 @@ static int mtk_disp_gamma_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_gamma_remove(struct platform_device *pdev)
+static void mtk_disp_gamma_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_gamma_component_ops);
-
- return 0;
}
static const struct mtk_disp_gamma_data mt8173_gamma_driver_data = {
@@ -209,7 +206,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_gamma_driver_dt_match);
struct platform_driver mtk_disp_gamma_driver = {
.probe = mtk_disp_gamma_probe,
- .remove = mtk_disp_gamma_remove,
+ .remove_new = mtk_disp_gamma_remove,
.driver = {
.name = "mediatek-disp-gamma",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
index 6428b6203ffe..e525a6b9e5b0 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
@@ -5,8 +5,7 @@
#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
@@ -295,11 +294,9 @@ static int mtk_disp_merge_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_merge_remove(struct platform_device *pdev)
+static void mtk_disp_merge_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_merge_component_ops);
-
- return 0;
}
static const struct of_device_id mtk_disp_merge_driver_dt_match[] = {
@@ -311,7 +308,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_merge_driver_dt_match);
struct platform_driver mtk_disp_merge_driver = {
.probe = mtk_disp_merge_probe,
- .remove = mtk_disp_merge_remove,
+ .remove_new = mtk_disp_merge_remove,
.driver = {
.name = "mediatek-disp-merge",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 8f52cc1f3fba..2bffe4245466 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -10,8 +10,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
@@ -562,12 +561,10 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_ovl_remove(struct platform_device *pdev)
+static void mtk_disp_ovl_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
@@ -659,7 +656,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
struct platform_driver mtk_disp_ovl_driver = {
.probe = mtk_disp_ovl_probe,
- .remove = mtk_disp_ovl_remove,
+ .remove_new = mtk_disp_ovl_remove,
.driver = {
.name = "mediatek-disp-ovl",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index c0a38f5217ee..6bf6367853fb 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -7,8 +7,9 @@
#include <drm/drm_of.h>
#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -426,7 +427,7 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma
continue;
}
- type = (enum mtk_ovl_adaptor_comp_type)of_id->data;
+ type = (enum mtk_ovl_adaptor_comp_type)(uintptr_t)of_id->data;
id = ovl_adaptor_comp_get_id(dev, node, type);
if (id < 0) {
dev_warn(dev, "Skipping unknown component %pOF\n",
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index d4df17ad600a..faa907f2f443 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -8,8 +8,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
@@ -380,13 +379,11 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_rdma_remove(struct platform_device *pdev)
+static void mtk_disp_rdma_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_rdma_component_ops);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct mtk_disp_rdma_data mt2701_rdma_driver_data = {
@@ -428,7 +425,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
struct platform_driver mtk_disp_rdma_driver = {
.probe = mtk_disp_rdma_probe,
- .remove = mtk_disp_rdma_remove,
+ .remove_new = mtk_disp_rdma_remove,
.driver = {
.name = "mediatek-disp-rdma",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 64eee77452c0..2cb47f663756 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -4,6 +4,7 @@
* Copyright (c) 2022 BayLibre
*/
+#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -100,6 +101,7 @@ struct mtk_dp_efuse_fmt {
struct mtk_dp {
bool enabled;
bool need_debounce;
+ int irq;
u8 max_lanes;
u8 max_linkrate;
u8 rx_cap[DP_RECEIVER_CAP_SIZE];
@@ -847,7 +849,7 @@ static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
u32 phy_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3628) &
AUX_RX_PHY_STATE_AUX_TX_P0_MASK;
if (phy_status != AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE) {
- drm_err(mtk_dp->drm_dev,
+ dev_err(mtk_dp->dev,
"AUX Rx Aux hang, need SW reset\n");
return -EIO;
}
@@ -1009,6 +1011,11 @@ static void mtk_dp_initialize_aux_settings(struct mtk_dp *mtk_dp)
mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_37C8,
MTK_ATOP_EN_AUX_TX_P0,
MTK_ATOP_EN_AUX_TX_P0);
+
+ /* Set complete reply mode for AUX */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3690,
+ RX_REPLY_COMPLETE_MODE_AUX_TX_P0,
+ RX_REPLY_COMPLETE_MODE_AUX_TX_P0);
}
static void mtk_dp_initialize_digital_settings(struct mtk_dp *mtk_dp)
@@ -1251,6 +1258,29 @@ static void mtk_dp_audio_mute(struct mtk_dp *mtk_dp, bool mute)
val[2], AU_TS_CFG_DP_ENC0_P0_MASK);
}
+static void mtk_dp_aux_panel_poweron(struct mtk_dp *mtk_dp, bool pwron)
+{
+ if (pwron) {
+ /* power on aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE,
+ DP_PWR_STATE_MASK);
+
+ /* power on panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ usleep_range(2000, 5000);
+ } else {
+ /* power off panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ usleep_range(2000, 3000);
+
+ /* power off aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+ }
+}
+
static void mtk_dp_power_enable(struct mtk_dp *mtk_dp)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_RESET_AND_PROBE,
@@ -1284,9 +1314,11 @@ static void mtk_dp_power_disable(struct mtk_dp *mtk_dp)
static void mtk_dp_initialize_priv_data(struct mtk_dp *mtk_dp)
{
+ bool plugged_in = (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP);
+
mtk_dp->train_info.link_rate = DP_LINK_BW_5_4;
mtk_dp->train_info.lane_count = mtk_dp->max_lanes;
- mtk_dp->train_info.cable_plugged_in = false;
+ mtk_dp->train_info.cable_plugged_in = plugged_in;
mtk_dp->info.format = DP_PIXELFORMAT_RGB;
memset(&mtk_dp->info.vm, 0, sizeof(struct videomode));
@@ -1588,7 +1620,19 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
u8 val;
ssize_t ret;
- drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
+ /*
+ * If we're eDP and capabilities were already parsed we can skip
+ * reading again because eDP panels aren't hotpluggable hence the
+ * caps and training information won't ever change in a boot life
+ */
+ if (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP &&
+ mtk_dp->rx_cap[DP_MAX_LINK_RATE] &&
+ mtk_dp->train_info.sink_ssc)
+ return 0;
+
+ ret = drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
+ if (ret < 0)
+ return ret;
if (drm_dp_tps4_supported(mtk_dp->rx_cap))
mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_4;
@@ -1615,10 +1659,13 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
return ret == 0 ? -EIO : ret;
}
- if (val)
- drm_dp_dpcd_writeb(&mtk_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
- val);
+ if (val) {
+ ret = drm_dp_dpcd_writeb(&mtk_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
+ val);
+ if (ret < 0)
+ return ret;
+ }
}
return 0;
@@ -1798,10 +1845,6 @@ static void mtk_dp_init_port(struct mtk_dp *mtk_dp)
mtk_dp_initialize_settings(mtk_dp);
mtk_dp_initialize_aux_settings(mtk_dp);
mtk_dp_initialize_digital_settings(mtk_dp);
-
- mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3690,
- RX_REPLY_COMPLETE_MODE_AUX_TX_P0,
- RX_REPLY_COMPLETE_MODE_AUX_TX_P0);
mtk_dp_initialize_hpd_detect_settings(mtk_dp);
mtk_dp_digital_sw_reset(mtk_dp);
@@ -1877,6 +1920,31 @@ static irqreturn_t mtk_dp_hpd_event(int hpd, void *dev)
return IRQ_WAKE_THREAD;
}
+static int mtk_dp_wait_hpd_asserted(struct drm_dp_aux *mtk_aux, unsigned long wait_us)
+{
+ struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
+ u32 val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(mtk_dp->regs, MTK_DP_TRANS_P0_3414,
+ val, !!(val & HPD_DB_DP_TRANS_P0_MASK),
+ wait_us / 100, wait_us);
+ if (ret) {
+ mtk_dp->train_info.cable_plugged_in = false;
+ return ret;
+ }
+
+ mtk_dp->train_info.cable_plugged_in = true;
+
+ ret = mtk_dp_parse_capabilities(mtk_dp);
+ if (ret) {
+ drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int mtk_dp_dt_parse(struct mtk_dp *mtk_dp,
struct platform_device *pdev)
{
@@ -1918,6 +1986,9 @@ static int mtk_dp_dt_parse(struct mtk_dp *mtk_dp,
static void mtk_dp_update_plugged_status(struct mtk_dp *mtk_dp)
{
+ if (!mtk_dp->data->audio_supported || !mtk_dp->audio_enable)
+ return;
+
mutex_lock(&mtk_dp->update_plugged_status_lock);
if (mtk_dp->plugged_cb && mtk_dp->codec_dev)
mtk_dp->plugged_cb(mtk_dp->codec_dev,
@@ -1936,16 +2007,9 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
if (!mtk_dp->train_info.cable_plugged_in)
return ret;
- if (!enabled) {
- /* power on aux */
- mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
- DP_PWR_STATE_BANDGAP_TPLL_LANE,
- DP_PWR_STATE_MASK);
+ if (!enabled)
+ mtk_dp_aux_panel_poweron(mtk_dp, true);
- /* power on panel */
- drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
- usleep_range(2000, 5000);
- }
/*
* Some dongles still source HPD when they do not connect to any
* sink device. To avoid this, we need to read the sink count
@@ -1957,16 +2021,8 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
if (DP_GET_SINK_COUNT(sink_count))
ret = connector_status_connected;
- if (!enabled) {
- /* power off panel */
- drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
- usleep_range(2000, 3000);
-
- /* power off aux */
- mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
- DP_PWR_STATE_BANDGAP_TPLL,
- DP_PWR_STATE_MASK);
- }
+ if (!enabled)
+ mtk_dp_aux_panel_poweron(mtk_dp, false);
return ret;
}
@@ -1982,15 +2038,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
if (!enabled) {
drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
-
- /* power on aux */
- mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
- DP_PWR_STATE_BANDGAP_TPLL_LANE,
- DP_PWR_STATE_MASK);
-
- /* power on panel */
- drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
- usleep_range(2000, 5000);
+ mtk_dp_aux_panel_poweron(mtk_dp, true);
}
new_edid = drm_get_edid(connector, &mtk_dp->aux.ddc);
@@ -2010,15 +2058,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
}
if (!enabled) {
- /* power off panel */
- drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
- usleep_range(2000, 3000);
-
- /* power off aux */
- mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
- DP_PWR_STATE_BANDGAP_TPLL,
- DP_PWR_STATE_MASK);
-
+ mtk_dp_aux_panel_poweron(mtk_dp, false);
drm_atomic_bridge_chain_post_disable(bridge, connector->state->state);
}
@@ -2028,15 +2068,14 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
struct drm_dp_aux_msg *msg)
{
- struct mtk_dp *mtk_dp;
+ struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
bool is_read;
u8 request;
size_t accessed_bytes = 0;
int ret;
- mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
-
- if (!mtk_dp->train_info.cable_plugged_in) {
+ if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP &&
+ !mtk_dp->train_info.cable_plugged_in) {
ret = -EAGAIN;
goto err;
}
@@ -2057,7 +2096,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
is_read = true;
break;
default:
- drm_err(mtk_aux->drm_dev, "invalid aux cmd = %d\n",
+ dev_err(mtk_dp->dev, "invalid aux cmd = %d\n",
msg->request);
ret = -EINVAL;
goto err;
@@ -2073,7 +2112,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
to_access, &msg->reply);
if (ret) {
- drm_info(mtk_dp->drm_dev,
+ dev_info(mtk_dp->dev,
"Failed to do AUX transfer: %d\n", ret);
goto err;
}
@@ -2143,7 +2182,11 @@ static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
mtk_dp->drm_dev = bridge->dev;
- mtk_dp_hwirq_enable(mtk_dp, true);
+ if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP) {
+ irq_clear_status_flags(mtk_dp->irq, IRQ_NOAUTOEN);
+ enable_irq(mtk_dp->irq);
+ mtk_dp_hwirq_enable(mtk_dp, true);
+ }
return 0;
@@ -2158,7 +2201,10 @@ static void mtk_dp_bridge_detach(struct drm_bridge *bridge)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
- mtk_dp_hwirq_enable(mtk_dp, false);
+ if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP) {
+ mtk_dp_hwirq_enable(mtk_dp, false);
+ disable_irq(mtk_dp->irq);
+ }
mtk_dp->drm_dev = NULL;
mtk_dp_poweroff(mtk_dp);
drm_dp_aux_unregister(&mtk_dp->aux);
@@ -2178,15 +2224,7 @@ static void mtk_dp_bridge_atomic_enable(struct drm_bridge *bridge,
return;
}
- /* power on aux */
- mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
- DP_PWR_STATE_BANDGAP_TPLL_LANE,
- DP_PWR_STATE_MASK);
-
- if (mtk_dp->train_info.cable_plugged_in) {
- drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
- usleep_range(2000, 5000);
- }
+ mtk_dp_aux_panel_poweron(mtk_dp, true);
/* Training */
ret = mtk_dp_training(mtk_dp);
@@ -2481,11 +2519,62 @@ static int mtk_dp_register_audio_driver(struct device *dev)
return PTR_ERR_OR_ZERO(mtk_dp->audio_pdev);
}
+static int mtk_dp_register_phy(struct mtk_dp *mtk_dp)
+{
+ struct device *dev = mtk_dp->dev;
+
+ mtk_dp->phy_dev = platform_device_register_data(dev, "mediatek-dp-phy",
+ PLATFORM_DEVID_AUTO,
+ &mtk_dp->regs,
+ sizeof(struct regmap *));
+ if (IS_ERR(mtk_dp->phy_dev))
+ return dev_err_probe(dev, PTR_ERR(mtk_dp->phy_dev),
+ "Failed to create device mediatek-dp-phy\n");
+
+ mtk_dp_get_calibration_data(mtk_dp);
+
+ mtk_dp->phy = devm_phy_get(&mtk_dp->phy_dev->dev, "dp");
+ if (IS_ERR(mtk_dp->phy)) {
+ platform_device_unregister(mtk_dp->phy_dev);
+ return dev_err_probe(dev, PTR_ERR(mtk_dp->phy), "Failed to get phy\n");
+ }
+
+ return 0;
+}
+
+static int mtk_dp_edp_link_panel(struct drm_dp_aux *mtk_aux)
+{
+ struct mtk_dp *mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
+ struct device *dev = mtk_aux->dev;
+ int ret;
+
+ mtk_dp->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+
+ /* Power off the DP and AUX: either detection is done, or no panel present */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+ mtk_dp_power_disable(mtk_dp);
+
+ if (IS_ERR(mtk_dp->next_bridge)) {
+ ret = PTR_ERR(mtk_dp->next_bridge);
+ mtk_dp->next_bridge = NULL;
+ return ret;
+ }
+
+ /* For eDP, we add the bridge only if the panel was found */
+ ret = devm_drm_bridge_add(dev, &mtk_dp->bridge);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int mtk_dp_probe(struct platform_device *pdev)
{
struct mtk_dp *mtk_dp;
struct device *dev = &pdev->dev;
- int ret, irq_num;
+ int ret;
mtk_dp = devm_kzalloc(dev, sizeof(*mtk_dp), GFP_KERNEL);
if (!mtk_dp)
@@ -2494,42 +2583,49 @@ static int mtk_dp_probe(struct platform_device *pdev)
mtk_dp->dev = dev;
mtk_dp->data = (struct mtk_dp_data *)of_device_get_match_data(dev);
- irq_num = platform_get_irq(pdev, 0);
- if (irq_num < 0)
- return dev_err_probe(dev, irq_num,
- "failed to request dp irq resource\n");
-
- mtk_dp->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
- if (IS_ERR(mtk_dp->next_bridge) &&
- PTR_ERR(mtk_dp->next_bridge) == -ENODEV)
- mtk_dp->next_bridge = NULL;
- else if (IS_ERR(mtk_dp->next_bridge))
- return dev_err_probe(dev, PTR_ERR(mtk_dp->next_bridge),
- "Failed to get bridge\n");
-
ret = mtk_dp_dt_parse(mtk_dp, pdev);
if (ret)
return dev_err_probe(dev, ret, "Failed to parse dt\n");
- drm_dp_aux_init(&mtk_dp->aux);
- mtk_dp->aux.name = "aux_mtk_dp";
- mtk_dp->aux.transfer = mtk_dp_aux_transfer;
-
- spin_lock_init(&mtk_dp->irq_thread_lock);
+ /*
+ * Request the interrupt and install service routine only if we are
+ * on full DisplayPort.
+ * For eDP, polling the HPD instead is more convenient because we
+ * don't expect any (un)plug events during runtime, hence we can
+ * avoid some locking.
+ */
+ if (mtk_dp->data->bridge_type != DRM_MODE_CONNECTOR_eDP) {
+ mtk_dp->irq = platform_get_irq(pdev, 0);
+ if (mtk_dp->irq < 0)
+ return dev_err_probe(dev, mtk_dp->irq,
+ "failed to request dp irq resource\n");
+
+ spin_lock_init(&mtk_dp->irq_thread_lock);
+
+ irq_set_status_flags(mtk_dp->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, mtk_dp->irq, mtk_dp_hpd_event,
+ mtk_dp_hpd_event_thread,
+ IRQ_TYPE_LEVEL_HIGH, dev_name(dev),
+ mtk_dp);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to request mediatek dptx irq\n");
- ret = devm_request_threaded_irq(dev, irq_num, mtk_dp_hpd_event,
- mtk_dp_hpd_event_thread,
- IRQ_TYPE_LEVEL_HIGH, dev_name(dev),
- mtk_dp);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to request mediatek dptx irq\n");
+ mtk_dp->need_debounce = true;
+ timer_setup(&mtk_dp->debounce_timer, mtk_dp_debounce_timer, 0);
+ }
- mutex_init(&mtk_dp->update_plugged_status_lock);
+ mtk_dp->aux.name = "aux_mtk_dp";
+ mtk_dp->aux.dev = dev;
+ mtk_dp->aux.transfer = mtk_dp_aux_transfer;
+ mtk_dp->aux.wait_hpd_asserted = mtk_dp_wait_hpd_asserted;
+ drm_dp_aux_init(&mtk_dp->aux);
platform_set_drvdata(pdev, mtk_dp);
if (mtk_dp->data->audio_supported) {
+ mutex_init(&mtk_dp->update_plugged_status_lock);
+
ret = mtk_dp_register_audio_driver(dev);
if (ret) {
dev_err(dev, "Failed to register audio driver: %d\n",
@@ -2538,35 +2634,59 @@ static int mtk_dp_probe(struct platform_device *pdev)
}
}
- mtk_dp->phy_dev = platform_device_register_data(dev, "mediatek-dp-phy",
- PLATFORM_DEVID_AUTO,
- &mtk_dp->regs,
- sizeof(struct regmap *));
- if (IS_ERR(mtk_dp->phy_dev))
- return dev_err_probe(dev, PTR_ERR(mtk_dp->phy_dev),
- "Failed to create device mediatek-dp-phy\n");
-
- mtk_dp_get_calibration_data(mtk_dp);
-
- mtk_dp->phy = devm_phy_get(&mtk_dp->phy_dev->dev, "dp");
-
- if (IS_ERR(mtk_dp->phy)) {
- platform_device_unregister(mtk_dp->phy_dev);
- return dev_err_probe(dev, PTR_ERR(mtk_dp->phy),
- "Failed to get phy\n");
- }
+ ret = mtk_dp_register_phy(mtk_dp);
+ if (ret)
+ return ret;
mtk_dp->bridge.funcs = &mtk_dp_bridge_funcs;
mtk_dp->bridge.of_node = dev->of_node;
-
- mtk_dp->bridge.ops =
- DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
mtk_dp->bridge.type = mtk_dp->data->bridge_type;
- drm_bridge_add(&mtk_dp->bridge);
+ if (mtk_dp->bridge.type == DRM_MODE_CONNECTOR_eDP) {
+ /*
+ * Set the data lanes to idle in case the bootloader didn't
+ * properly close the eDP port to avoid stalls and then
+ * reinitialize, reset and power on the AUX block.
+ */
+ mtk_dp_set_idle_pattern(mtk_dp, true);
+ mtk_dp_initialize_aux_settings(mtk_dp);
+ mtk_dp_power_enable(mtk_dp);
- mtk_dp->need_debounce = true;
- timer_setup(&mtk_dp->debounce_timer, mtk_dp_debounce_timer, 0);
+ /* Disable HW interrupts: we don't need any for eDP */
+ mtk_dp_hwirq_enable(mtk_dp, false);
+
+ /*
+ * Power on the AUX to allow reading the EDID from aux-bus:
+ * please note that it is necessary to call power off in the
+ * .done_probing() callback (mtk_dp_edp_link_panel), as only
+ * there we can safely assume that we finished reading EDID.
+ */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE,
+ DP_PWR_STATE_MASK);
+
+ ret = devm_of_dp_aux_populate_bus(&mtk_dp->aux, mtk_dp_edp_link_panel);
+ if (ret) {
+ /* -ENODEV this means that the panel is not on the aux-bus */
+ if (ret == -ENODEV) {
+ ret = mtk_dp_edp_link_panel(&mtk_dp->aux);
+ if (ret)
+ return ret;
+ } else {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+ mtk_dp_power_disable(mtk_dp);
+ return ret;
+ }
+ }
+ } else {
+ mtk_dp->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+ ret = devm_drm_bridge_add(dev, &mtk_dp->bridge);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add bridge\n");
+ }
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -2574,19 +2694,17 @@ static int mtk_dp_probe(struct platform_device *pdev)
return 0;
}
-static int mtk_dp_remove(struct platform_device *pdev)
+static void mtk_dp_remove(struct platform_device *pdev)
{
struct mtk_dp *mtk_dp = platform_get_drvdata(pdev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- del_timer_sync(&mtk_dp->debounce_timer);
- drm_bridge_remove(&mtk_dp->bridge);
+ if (mtk_dp->data->bridge_type != DRM_MODE_CONNECTOR_eDP)
+ del_timer_sync(&mtk_dp->debounce_timer);
platform_device_unregister(mtk_dp->phy_dev);
if (mtk_dp->audio_pdev)
platform_device_unregister(mtk_dp->audio_pdev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -2595,7 +2713,8 @@ static int mtk_dp_suspend(struct device *dev)
struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
mtk_dp_power_disable(mtk_dp);
- mtk_dp_hwirq_enable(mtk_dp, false);
+ if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP)
+ mtk_dp_hwirq_enable(mtk_dp, false);
pm_runtime_put_sync(dev);
return 0;
@@ -2607,7 +2726,8 @@ static int mtk_dp_resume(struct device *dev)
pm_runtime_get_sync(dev);
mtk_dp_init_port(mtk_dp);
- mtk_dp_hwirq_enable(mtk_dp, true);
+ if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP)
+ mtk_dp_hwirq_enable(mtk_dp, true);
mtk_dp_power_enable(mtk_dp);
return 0;
@@ -2645,7 +2765,7 @@ MODULE_DEVICE_TABLE(of, mtk_dp_of_match);
static struct platform_driver mtk_dp_driver = {
.probe = mtk_dp_probe,
- .remove = mtk_dp_remove,
+ .remove_new = mtk_dp_remove,
.driver = {
.name = "mediatek-drm-dp",
.of_match_table = mtk_dp_of_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 948a53f1f4b3..2f931e4e2b60 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -1007,7 +1006,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_dpi *dpi;
- struct resource *mem;
int ret;
dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
@@ -1038,49 +1036,34 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n");
}
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dpi->regs = devm_ioremap_resource(dev, mem);
- if (IS_ERR(dpi->regs)) {
- ret = PTR_ERR(dpi->regs);
- dev_err(dev, "Failed to ioremap mem resource: %d\n", ret);
- return ret;
- }
+ dpi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dpi->regs))
+ return dev_err_probe(dev, PTR_ERR(dpi->regs),
+ "Failed to ioremap mem resource\n");
dpi->engine_clk = devm_clk_get(dev, "engine");
- if (IS_ERR(dpi->engine_clk)) {
- ret = PTR_ERR(dpi->engine_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get engine clock: %d\n", ret);
-
- return ret;
- }
+ if (IS_ERR(dpi->engine_clk))
+ return dev_err_probe(dev, PTR_ERR(dpi->engine_clk),
+ "Failed to get engine clock\n");
dpi->pixel_clk = devm_clk_get(dev, "pixel");
- if (IS_ERR(dpi->pixel_clk)) {
- ret = PTR_ERR(dpi->pixel_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get pixel clock: %d\n", ret);
-
- return ret;
- }
+ if (IS_ERR(dpi->pixel_clk))
+ return dev_err_probe(dev, PTR_ERR(dpi->pixel_clk),
+ "Failed to get pixel clock\n");
dpi->tvd_clk = devm_clk_get(dev, "pll");
- if (IS_ERR(dpi->tvd_clk)) {
- ret = PTR_ERR(dpi->tvd_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
-
- return ret;
- }
+ if (IS_ERR(dpi->tvd_clk))
+ return dev_err_probe(dev, PTR_ERR(dpi->tvd_clk),
+ "Failed to get tvdpll clock\n");
dpi->irq = platform_get_irq(pdev, 0);
- if (dpi->irq <= 0)
- return -EINVAL;
+ if (dpi->irq < 0)
+ return dpi->irq;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- NULL, &dpi->next_bridge);
- if (ret)
- return ret;
+ dpi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(dpi->next_bridge))
+ return dev_err_probe(dev, PTR_ERR(dpi->next_bridge),
+ "Failed to get bridge\n");
dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node);
@@ -1090,57 +1073,37 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dpi->bridge.of_node = dev->of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
- drm_bridge_add(&dpi->bridge);
+ ret = devm_drm_bridge_add(dev, &dpi->bridge);
+ if (ret)
+ return ret;
ret = component_add(dev, &mtk_dpi_component_ops);
- if (ret) {
- drm_bridge_remove(&dpi->bridge);
- dev_err(dev, "Failed to add component: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add component.\n");
return 0;
}
-static int mtk_dpi_remove(struct platform_device *pdev)
+static void mtk_dpi_remove(struct platform_device *pdev)
{
- struct mtk_dpi *dpi = platform_get_drvdata(pdev);
-
component_del(&pdev->dev, &mtk_dpi_component_ops);
- drm_bridge_remove(&dpi->bridge);
-
- return 0;
}
static const struct of_device_id mtk_dpi_of_ids[] = {
- { .compatible = "mediatek,mt2701-dpi",
- .data = &mt2701_conf,
- },
- { .compatible = "mediatek,mt8173-dpi",
- .data = &mt8173_conf,
- },
- { .compatible = "mediatek,mt8183-dpi",
- .data = &mt8183_conf,
- },
- { .compatible = "mediatek,mt8186-dpi",
- .data = &mt8186_conf,
- },
- { .compatible = "mediatek,mt8188-dp-intf",
- .data = &mt8188_dpintf_conf,
- },
- { .compatible = "mediatek,mt8192-dpi",
- .data = &mt8192_conf,
- },
- { .compatible = "mediatek,mt8195-dp-intf",
- .data = &mt8195_dpintf_conf,
- },
- { },
+ { .compatible = "mediatek,mt2701-dpi", .data = &mt2701_conf },
+ { .compatible = "mediatek,mt8173-dpi", .data = &mt8173_conf },
+ { .compatible = "mediatek,mt8183-dpi", .data = &mt8183_conf },
+ { .compatible = "mediatek,mt8186-dpi", .data = &mt8186_conf },
+ { .compatible = "mediatek,mt8188-dp-intf", .data = &mt8188_dpintf_conf },
+ { .compatible = "mediatek,mt8192-dpi", .data = &mt8192_conf },
+ { .compatible = "mediatek,mt8195-dp-intf", .data = &mt8195_dpintf_conf },
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mtk_dpi_of_ids);
struct platform_driver mtk_dpi_driver = {
.probe = mtk_dpi_probe,
- .remove = mtk_dpi_remove,
+ .remove_new = mtk_dpi_remove,
.driver = {
.name = "mediatek-dpi",
.of_match_table = mtk_dpi_of_ids,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index d40142842f85..b6fa4ad2f94d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/mailbox_controller.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
@@ -116,10 +117,9 @@ static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *
dma_addr_t dma_addr;
pkt->va_base = kzalloc(size, GFP_KERNEL);
- if (!pkt->va_base) {
- kfree(pkt);
+ if (!pkt->va_base)
return -ENOMEM;
- }
+
pkt->buf_size = size;
pkt->cl = (void *)client;
@@ -129,7 +129,6 @@ static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
kfree(pkt->va_base);
- kfree(pkt);
return -ENOMEM;
}
@@ -145,7 +144,6 @@ static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
DMA_TO_DEVICE);
kfree(pkt->va_base);
- kfree(pkt);
}
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index f114da4d36a9..771f4e173353 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -563,14 +563,15 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
/* Not all drm components have a DTS device node, such as ovl_adaptor,
* which is the drm bring up sub driver
*/
- if (node) {
- comp_pdev = of_find_device_by_node(node);
- if (!comp_pdev) {
- DRM_INFO("Waiting for device %s\n", node->full_name);
- return -EPROBE_DEFER;
- }
- comp->dev = &comp_pdev->dev;
+ if (!node)
+ return 0;
+
+ comp_pdev = of_find_device_by_node(node);
+ if (!comp_pdev) {
+ DRM_INFO("Waiting for device %s\n", node->full_name);
+ return -EPROBE_DEFER;
}
+ comp->dev = &comp_pdev->dev;
if (type == MTK_DISP_AAL ||
type == MTK_DISP_BLS ||
@@ -580,7 +581,6 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
type == MTK_DISP_MERGE ||
type == MTK_DISP_OVL ||
type == MTK_DISP_OVL_2L ||
- type == MTK_DISP_OVL_ADAPTOR ||
type == MTK_DISP_PWM ||
type == MTK_DISP_RDMA ||
type == MTK_DPI ||
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 6dcb4ba2466c..93552d76b6e7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -7,8 +7,9 @@
#include <linux/component.h>
#include <linux/iommu.h>
#include <linux/module.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
@@ -354,7 +355,7 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
const struct of_device_id *of_id;
struct device_node *node;
struct device *drm_dev;
- int cnt = 0;
+ unsigned int cnt = 0;
int i, j;
for_each_child_of_node(phandle->parent, node) {
@@ -375,6 +376,9 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
all_drm_priv[cnt] = dev_get_drvdata(drm_dev);
if (all_drm_priv[cnt] && all_drm_priv[cnt]->mtk_drm_bound)
cnt++;
+
+ if (cnt == MAX_CRTC)
+ break;
}
if (drm_priv->data->mmsys_dev_num == cnt) {
@@ -556,11 +560,8 @@ static const struct drm_driver mtk_drm_driver = {
.dumb_create = mtk_drm_gem_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = mtk_drm_gem_prime_import,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
.fops = &mtk_drm_fops,
.name = DRIVER_NAME,
@@ -829,7 +830,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
continue;
}
- comp_type = (enum mtk_ddp_comp_type)of_id->data;
+ comp_type = (enum mtk_ddp_comp_type)(uintptr_t)of_id->data;
if (comp_type == MTK_DISP_MUTEX) {
int id;
@@ -909,7 +910,7 @@ err_node:
return ret;
}
-static int mtk_drm_remove(struct platform_device *pdev)
+static void mtk_drm_remove(struct platform_device *pdev)
{
struct mtk_drm_private *private = platform_get_drvdata(pdev);
int i;
@@ -919,8 +920,6 @@ static int mtk_drm_remove(struct platform_device *pdev)
of_node_put(private->mutex_node);
for (i = 0; i < DDP_COMPONENT_DRM_ID_MAX; i++)
of_node_put(private->comp_node[i]);
-
- return 0;
}
static int mtk_drm_sys_prepare(struct device *dev)
@@ -953,7 +952,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = {
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
- .remove = mtk_drm_remove,
+ .remove_new = mtk_drm_remove,
.driver = {
.name = "mediatek-drm",
.pm = &mtk_drm_pm_ops,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index a25b28d3ee90..9f364df52478 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -247,7 +247,11 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
-
+ if (!mtk_gem->kvaddr) {
+ kfree(sgt);
+ kfree(mtk_gem->pages);
+ return -ENOMEM;
+ }
out:
kfree(sgt);
iosys_map_set_vaddr(map, mtk_gem->kvaddr);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 31f9420aff6f..db2f70ae060d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -122,11 +122,7 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
if (ret)
return ret;
- if (state)
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
- else /* Special case for asynchronous cursor updates. */
- crtc_state = new_plane_state->crtc->state;
+ crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc);
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
DRM_PLANE_NO_SCALING,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 7d5250351193..d8bfc2cce54d 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1178,14 +1178,12 @@ err_unregister_host:
return ret;
}
-static int mtk_dsi_remove(struct platform_device *pdev)
+static void mtk_dsi_remove(struct platform_device *pdev)
{
struct mtk_dsi *dsi = platform_get_drvdata(pdev);
mtk_output_dsi_disable(dsi);
mipi_dsi_host_unregister(&dsi->host);
-
- return 0;
}
static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
@@ -1223,7 +1221,7 @@ MODULE_DEVICE_TABLE(of, mtk_dsi_of_match);
struct platform_driver mtk_dsi_driver = {
.probe = mtk_dsi_probe,
- .remove = mtk_dsi_remove,
+ .remove_new = mtk_dsi_remove,
.driver = {
.name = "mtk-dsi",
.of_match_table = mtk_dsi_of_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
index 73dc4da3ba3b..db7ac666ec5e 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
@@ -7,7 +7,7 @@
#include <drm/drm_framebuffer.h>
#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 0a8e0a13f516..86133bf16326 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1746,13 +1746,12 @@ err_bridge_remove:
return ret;
}
-static int mtk_drm_hdmi_remove(struct platform_device *pdev)
+static void mtk_drm_hdmi_remove(struct platform_device *pdev)
{
struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
drm_bridge_remove(&hdmi->bridge);
mtk_hdmi_clk_disable_audio(hdmi);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1806,7 +1805,7 @@ MODULE_DEVICE_TABLE(of, mtk_drm_hdmi_of_ids);
static struct platform_driver mtk_hdmi_driver = {
.probe = mtk_drm_hdmi_probe,
- .remove = mtk_drm_hdmi_remove,
+ .remove_new = mtk_drm_hdmi_remove,
.driver = {
.name = "mediatek-drm-hdmi",
.of_match_table = mtk_drm_hdmi_of_ids,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index 2fc9214ffa82..d675c954befe 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -295,7 +295,7 @@ static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
return ret;
}
- strlcpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name));
+ strscpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name));
ddc->adap.owner = THIS_MODULE;
ddc->adap.class = I2C_CLASS_DDC;
ddc->adap.algo = &mtk_hdmi_ddc_algorithm;
@@ -324,14 +324,12 @@ err_clk_disable:
return ret;
}
-static int mtk_hdmi_ddc_remove(struct platform_device *pdev)
+static void mtk_hdmi_ddc_remove(struct platform_device *pdev)
{
struct mtk_hdmi_ddc *ddc = platform_get_drvdata(pdev);
i2c_del_adapter(&ddc->adap);
clk_disable_unprepare(ddc->clk);
-
- return 0;
}
static const struct of_device_id mtk_hdmi_ddc_match[] = {
@@ -342,7 +340,7 @@ MODULE_DEVICE_TABLE(of, mtk_hdmi_ddc_match);
struct platform_driver mtk_hdmi_ddc_driver = {
.probe = mtk_hdmi_ddc_probe,
- .remove = mtk_hdmi_ddc_remove,
+ .remove_new = mtk_hdmi_ddc_remove,
.driver = {
.name = "mediatek-hdmi-ddc",
.of_match_table = mtk_hdmi_ddc_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
index e06db6e56b5f..c3adaeefd551 100644
--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
@@ -6,8 +6,7 @@
#include <drm/drm_fourcc.h>
#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
@@ -315,11 +314,10 @@ static int mtk_mdp_rdma_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_mdp_rdma_remove(struct platform_device *pdev)
+static void mtk_mdp_rdma_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_mdp_rdma_component_ops);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static const struct of_device_id mtk_mdp_rdma_driver_dt_match[] = {
@@ -330,7 +328,7 @@ MODULE_DEVICE_TABLE(of, mtk_mdp_rdma_driver_dt_match);
struct platform_driver mtk_mdp_rdma_driver = {
.probe = mtk_mdp_rdma_probe,
- .remove = mtk_mdp_rdma_remove,
+ .remove_new = mtk_mdp_rdma_remove,
.driver = {
.name = "mediatek-mdp-rdma",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 747b639ea0c4..cb674966e9ac 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -516,11 +516,9 @@ static int meson_drv_probe(struct platform_device *pdev)
return 0;
};
-static int meson_drv_remove(struct platform_device *pdev)
+static void meson_drv_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &meson_drv_master_ops);
-
- return 0;
}
static struct meson_drm_match_data meson_drm_gxbb_data = {
@@ -560,7 +558,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = {
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
- .remove = meson_drv_remove,
+ .remove_new = meson_drv_remove,
.shutdown = meson_drv_shutdown,
.driver = {
.name = "meson-drm",
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index b23009a3380f..3f9345c14f31 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -9,7 +9,6 @@
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
struct drm_crtc;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 3d046878ce6c..5a9538bc0e26 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -9,8 +9,9 @@
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -379,8 +380,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
mode->clock > 340000 ? 40 : 10);
if (drm_mode_is_420_only(display, mode) ||
- (!is_hdmi2_sink &&
- drm_mode_is_420_also(display, mode)))
+ (!is_hdmi2_sink && drm_mode_is_420_also(display, mode)) ||
+ dw_hdmi_bus_fmt_is_420(hdmi))
mode_is_420 = true;
/* Enable clocks */
@@ -852,11 +853,9 @@ static int meson_dw_hdmi_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &meson_dw_hdmi_ops);
}
-static int meson_dw_hdmi_remove(struct platform_device *pdev)
+static void meson_dw_hdmi_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &meson_dw_hdmi_ops);
-
- return 0;
}
static const struct dev_pm_ops meson_dw_hdmi_pm_ops = {
@@ -879,7 +878,7 @@ MODULE_DEVICE_TABLE(of, meson_dw_hdmi_of_table);
static struct platform_driver meson_dw_hdmi_platform_driver = {
.probe = meson_dw_hdmi_probe,
- .remove = meson_dw_hdmi_remove,
+ .remove_new = meson_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_dw_hdmi_of_table,
diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
index 57447abf1a29..e5fe4e994f43 100644
--- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
+++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
@@ -7,9 +7,10 @@
#include <linux/clk.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
#include <linux/bitfield.h>
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index 812e172dec63..3f93c70488ca 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 53231bfdf7e2..9913971fa5d2 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -9,8 +9,10 @@
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 976f0ab2006b..abddf37f0ea1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -20,7 +20,7 @@
#include "mgag200_drv.h"
-int mgag200_modeset = -1;
+static int mgag200_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, mgag200_modeset, int, 0400);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 0f2dd26755df..af3ce5a6a636 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -642,6 +642,11 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
+ if (crtc_state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ else
+ mgag200_crtc_set_gamma_linear(mdev, format);
+
mgag200_enable_display(mdev);
if (funcs->enable_vidrst)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index a78662bd6273..6309a857ca31 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -21,7 +21,7 @@ config DRM_MSM
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_SCHED
- select FB_SYS_HELPERS if DRM_FBDEV_EMULATION
+ select FB_SYSMEM_HELPERS if DRM_FBDEV_EMULATION
select SHMEM
select TMPFS
select QCOM_SCM
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 03fa89bf3e4b..cd73ee0cbf23 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -3,6 +3,8 @@
#include <linux/clk.h>
#include <linux/interconnect.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 1245c7aa49df..4a2e479723a8 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -6,7 +6,7 @@
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 645927214871..5d9ec27c89d3 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -10,7 +10,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 3132105a2a43..60509fb39710 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -7,6 +7,8 @@
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 9780107e1cc9..3e00fb8190b2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -3,7 +3,8 @@
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include "hdmi.h"
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 45d3d14909df..4bd028fa7500 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1086,10 +1086,7 @@ static const struct drm_driver msm_driver = {
.postclose = msm_postclose,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
- .gem_prime_mmap = msm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9d9d5e009163..02fd6c7d0bb7 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -286,7 +286,6 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t
void msm_gem_shrinker_init(struct drm_device *dev);
void msm_gem_shrinker_cleanup(struct drm_device *dev);
-int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index b933a85420f6..030bedac632d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -25,9 +25,9 @@ module_param(fbdev, bool, 0600);
* fbdev funcs, to implement legacy fbdev interface on top of drm driver
*/
-FB_GEN_DEFAULT_DEFERRED_SYS_OPS(msm_fbdev,
- drm_fb_helper_damage_range,
- drm_fb_helper_damage_area)
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(msm_fbdev,
+ drm_fb_helper_damage_range,
+ drm_fb_helper_damage_area)
static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
@@ -246,10 +246,6 @@ void msm_fbdev_setup(struct drm_device *dev)
goto err_drm_fb_helper_unprepare;
}
- ret = msm_fbdev_client_hotplug(&helper->client);
- if (ret)
- drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
drm_client_register(&helper->client);
return;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index ce1ed0f9ad2d..db1e748daa75 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1239,6 +1239,10 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
list_add_tail(&msm_obj->node, &priv->objects);
mutex_unlock(&priv->obj_lock);
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto fail;
+
return obj;
fail:
@@ -1295,6 +1299,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
list_add_tail(&msm_obj->node, &priv->objects);
mutex_unlock(&priv->obj_lock);
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto fail;
+
return obj;
fail:
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index c1d91863df05..5f68e31a3e4e 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -11,21 +11,6 @@
#include "msm_drv.h"
#include "msm_gem.h"
-int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- int ret;
-
- /* Ensure the mmap offset is initialized. We lazily initialize it,
- * so if it has not been first mmap'd directly as a GEM object, the
- * mmap offset will not be already initialized.
- */
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- return ret;
-
- return drm_gem_prime_mmap(obj, vma);
-}
-
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index be204b2ecf6a..2e87dd6cb17b 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -10,6 +10,8 @@
#include <linux/irqchip.h>
#include <linux/irqdesc.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index c9d8cbb21407..18de2f17e249 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -10,7 +10,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -285,7 +284,7 @@ err_free:
return ret;
}
-static int lcdif_remove(struct platform_device *pdev)
+static void lcdif_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
@@ -293,8 +292,6 @@ static int lcdif_remove(struct platform_device *pdev)
drm_atomic_helper_shutdown(drm);
lcdif_unload(drm);
drm_dev_put(drm);
-
- return 0;
}
static void lcdif_shutdown(struct platform_device *pdev)
@@ -362,7 +359,7 @@ static const struct dev_pm_ops lcdif_pm_ops = {
static struct platform_driver lcdif_platform_driver = {
.probe = lcdif_probe,
- .remove = lcdif_remove,
+ .remove_new = lcdif_remove,
.shutdown = lcdif_shutdown,
.driver = {
.name = "imx-lcdif",
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 368b1fbd8305..625c1bfc4173 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -377,7 +377,7 @@ err_free:
return ret;
}
-static int mxsfb_remove(struct platform_device *pdev)
+static void mxsfb_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
@@ -385,8 +385,6 @@ static int mxsfb_remove(struct platform_device *pdev)
drm_atomic_helper_shutdown(drm);
mxsfb_unload(drm);
drm_dev_put(drm);
-
- return 0;
}
static void mxsfb_shutdown(struct platform_device *pdev)
@@ -418,7 +416,7 @@ static const struct dev_pm_ops mxsfb_pm_ops = {
static struct platform_driver mxsfb_platform_driver = {
.probe = mxsfb_probe,
- .remove = mxsfb_remove,
+ .remove_new = mxsfb_remove,
.shutdown = mxsfb_shutdown,
.driver = {
.name = "mxsfb",
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index 3bcc9c0f2019..7ed2516b6de0 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -611,6 +611,14 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
writel(ctrl, mxsfb->base + LCDC_AS_CTRL);
}
+static void mxsfb_plane_overlay_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
+
+ writel(0, mxsfb->base + LCDC_AS_CTRL);
+}
+
static bool mxsfb_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
@@ -626,6 +634,7 @@ static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = {
static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = {
.atomic_check = mxsfb_plane_atomic_check,
.atomic_update = mxsfb_plane_overlay_atomic_update,
+ .atomic_disable = mxsfb_plane_overlay_atomic_disable,
};
static const struct drm_plane_funcs mxsfb_plane_funcs = {
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 5e5617006da5..cf6b3a80c0c8 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -47,6 +47,9 @@ nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o
nouveau-y += nouveau_ttm.o
nouveau-y += nouveau_vmm.o
+nouveau-y += nouveau_exec.o
+nouveau-y += nouveau_sched.o
+nouveau-y += nouveau_uvmm.o
# DRM - modesetting
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index a70bd65e1400..c52e8096cca4 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,6 +10,8 @@ config DRM_NOUVEAU
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
+ select DRM_EXEC
+ select DRM_SCHED
select I2C
select I2C_ALGOBIT
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index a6f2e681bde9..a34924523133 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1122,11 +1122,18 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
PUSH_KICK(push);
- ret = nouveau_fence_new(chan, false, pfence);
+ ret = nouveau_fence_new(pfence);
if (ret)
goto fail;
+ ret = nouveau_fence_emit(*pfence, chan);
+ if (ret)
+ goto fail_fence_unref;
+
return 0;
+
+fail_fence_unref:
+ nouveau_fence_unref(pfence);
fail:
spin_lock_irqsave(&dev->event_lock, flags);
list_del(&s->head);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 78ee32da01c8..a95ee5dcc2e3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -29,6 +29,7 @@
#include <nvhw/class/cl507a.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
bool
curs507a_space(struct nv50_wndw *wndw)
@@ -99,6 +100,7 @@ curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
{
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
struct nv50_head *head = nv50_head(asyw->state.crtc);
+ struct drm_framebuffer *fb = asyw->state.fb;
int ret;
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
@@ -124,11 +126,30 @@ curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
return -EINVAL;
}
+ if (asyw->image.pitch[0] != asyw->image.w * fb->format->cpp[0]) {
+ NV_ATOMIC(drm,
+ "%s: invalid cursor image pitch: image must be packed (pitch = %d, width = %d)\n",
+ wndw->plane.name, asyw->image.pitch[0], asyw->image.w);
+ return -EINVAL;
+ }
+
ret = head->func->curs_layout(head, asyw, asyh);
- if (ret)
+ if (ret) {
+ NV_ATOMIC(drm,
+ "%s: invalid cursor image size: unsupported size %dx%d\n",
+ wndw->plane.name, asyw->image.w, asyw->image.h);
+ return ret;
+ }
+
+ ret = head->func->curs_format(head, asyw, asyh);
+ if (ret) {
+ NV_ATOMIC(drm,
+ "%s: invalid cursor image format 0x%X\n",
+ wndw->plane.name, fb->format->format);
return ret;
+ }
- return head->func->curs_format(head, asyw, asyh);
+ return 0;
}
static const u32
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 9b6824f6b9e4..4e7c9c353c51 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -910,15 +910,19 @@ nv50_msto_prepare(struct drm_atomic_state *state,
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
- struct drm_dp_mst_atomic_payload *payload;
+ struct drm_dp_mst_topology_state *old_mst_state;
+ struct drm_dp_mst_atomic_payload *payload, *old_payload;
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
+ old_mst_state = drm_atomic_get_old_mst_topology_state(state, mgr);
+
payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
+ old_payload = drm_atomic_get_mst_payload_state(old_mst_state, mstc->port);
// TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
if (msto->disabled) {
- drm_dp_remove_payload(mgr, mst_state, payload, payload);
+ drm_dp_remove_payload(mgr, mst_state, old_payload, payload);
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
} else {
@@ -1124,7 +1128,7 @@ nv50_mstc_mode_valid(struct drm_connector *connector,
* MSTB's max possible PBN
*/
- return nv50_dp_mode_valid(connector, outp, mode, NULL);
+ return nv50_dp_mode_valid(outp, mode, NULL);
}
static int
@@ -1359,22 +1363,26 @@ nv50_mstm_service(struct nouveau_drm *drm,
u8 esi[8] = {};
while (handled) {
+ u8 ack[8] = {};
+
rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
if (rc != 8) {
ret = false;
break;
}
- drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
+ drm_dp_mst_hpd_irq_handle_event(&mstm->mgr, esi, ack, &handled);
if (!handled)
break;
- rc = drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1],
- 3);
- if (rc != 3) {
+ rc = drm_dp_dpcd_writeb(aux, DP_SINK_COUNT_ESI + 1, ack[1]);
+
+ if (rc != 1) {
ret = false;
break;
}
+
+ drm_dp_mst_hpd_irq_send_new_request(&mstm->mgr);
}
if (!ret)
@@ -1869,6 +1877,8 @@ nv50_pior_destroy(struct drm_encoder *encoder)
nvif_outp_dtor(&nv_encoder->outp);
drm_encoder_cleanup(encoder);
+
+ mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
kfree(encoder);
}
@@ -1913,6 +1923,8 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->i2c = ddc;
nv_encoder->aux = aux;
+ mutex_init(&nv_encoder->dp.hpd_irq_lock);
+
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000c.h b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
index 9c7ff56831c5..a5a182b3c28d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if000c.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
@@ -3,7 +3,10 @@
struct nvif_vmm_v0 {
__u8 version;
__u8 page_nr;
- __u8 managed;
+#define NVIF_VMM_V0_TYPE_UNMANAGED 0x00
+#define NVIF_VMM_V0_TYPE_MANAGED 0x01
+#define NVIF_VMM_V0_TYPE_RAW 0x02
+ __u8 type;
__u8 pad03[5];
__u64 addr;
__u64 size;
@@ -17,6 +20,7 @@ struct nvif_vmm_v0 {
#define NVIF_VMM_V0_UNMAP 0x04
#define NVIF_VMM_V0_PFNMAP 0x05
#define NVIF_VMM_V0_PFNCLR 0x06
+#define NVIF_VMM_V0_RAW 0x07
#define NVIF_VMM_V0_MTHD(i) ((i) + 0x80)
struct nvif_vmm_page_v0 {
@@ -66,6 +70,26 @@ struct nvif_vmm_unmap_v0 {
__u64 addr;
};
+struct nvif_vmm_raw_v0 {
+ __u8 version;
+#define NVIF_VMM_RAW_V0_GET 0x0
+#define NVIF_VMM_RAW_V0_PUT 0x1
+#define NVIF_VMM_RAW_V0_MAP 0x2
+#define NVIF_VMM_RAW_V0_UNMAP 0x3
+#define NVIF_VMM_RAW_V0_SPARSE 0x4
+ __u8 op;
+ __u8 sparse;
+ __u8 ref;
+ __u8 shift;
+ __u32 argc;
+ __u8 pad01[7];
+ __u64 addr;
+ __u64 size;
+ __u64 offset;
+ __u64 memory;
+ __u64 argv;
+};
+
struct nvif_vmm_pfnmap_v0 {
__u8 version;
__u8 page;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/vmm.h b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
index a2ee92201ace..0ecedd0ee0a5 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/vmm.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
@@ -4,6 +4,12 @@
struct nvif_mem;
struct nvif_mmu;
+enum nvif_vmm_type {
+ UNMANAGED,
+ MANAGED,
+ RAW,
+};
+
enum nvif_vmm_get {
ADDR,
PTES,
@@ -30,8 +36,9 @@ struct nvif_vmm {
int page_nr;
};
-int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass, bool managed,
- u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *);
+int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass,
+ enum nvif_vmm_type, u64 addr, u64 size, void *argv, u32 argc,
+ struct nvif_vmm *);
void nvif_vmm_dtor(struct nvif_vmm *);
int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
u8 page, u8 align, u64 size, struct nvif_vma *);
@@ -39,4 +46,12 @@ void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_mem *, u64 offset);
int nvif_vmm_unmap(struct nvif_vmm *, u64);
+
+int nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
+int nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
+int nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
+ void *argv, u32 argc, struct nvif_mem *mem, u64 offset);
+int nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
+ u8 shift, bool sparse);
+int nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index b67b9c1a6b4e..738899fcf30b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -3,7 +3,7 @@
#define __NVKM_ENGINE_H__
#define nvkm_engine(p) container_of((p), struct nvkm_engine, subdev)
#include <core/subdev.h>
-struct nvkm_fifo_chan;
+struct nvkm_chan;
struct nvkm_fb_tile;
extern const struct nvkm_subdev_func nvkm_engine;
@@ -22,6 +22,7 @@ struct nvkm_engine_func {
int (*init)(struct nvkm_engine *);
int (*fini)(struct nvkm_engine *, bool suspend);
int (*reset)(struct nvkm_engine *);
+ int (*nonstall)(struct nvkm_engine *);
void (*intr)(struct nvkm_engine *);
void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *);
bool (*chsw_load)(struct nvkm_engine *);
@@ -32,8 +33,7 @@ struct nvkm_engine_func {
} base;
struct {
- int (*cclass)(struct nvkm_fifo_chan *,
- const struct nvkm_oclass *,
+ int (*cclass)(struct nvkm_chan *, const struct nvkm_oclass *,
struct nvkm_object **);
int (*sclass)(struct nvkm_oclass *, int index);
} fifo;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index 4486d9862849..3fd5c007a663 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -49,9 +49,4 @@ nvkm_blob_dtor(struct nvkm_blob *blob)
(p = container_of((h), typeof(*p), m), nvkm_list_find_next(p, (h), m, (c)))
#define nvkm_list_foreach(p,h,m,c) \
for (p = nvkm_list_find(p, (h), m, (c)); p; p = nvkm_list_find_next(p, (h), m, (c)))
-
-/*FIXME: remove after */
-#define nvkm_fifo_chan nvkm_chan
-#define nvkm_fifo_chan_func nvkm_chan_func
-#define nvkm_fifo_cgrp nvkm_cgrp
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index cd86d9198e4a..b7bb8a29a729 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -3,7 +3,7 @@
#define __NVKM_FLCNEN_H__
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
#include <core/engine.h>
-struct nvkm_fifo_chan;
+struct nvkm_chan;
enum nvkm_falcon_dmaidx {
FALCON_DMAIDX_UCODE = 0,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 01a22a13b452..1755b0df3cc1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -59,6 +59,7 @@ struct nvkm_fb {
struct nvkm_memory *mmu_wr;
};
+u64 nvkm_fb_vidmem_size(struct nvkm_device *);
int nvkm_fb_mem_unlock(struct nvkm_fb *);
void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index 40a1065ae626..ef441dfdea09 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -16,7 +16,7 @@ struct nvkm_i2c_bus {
const struct nvkm_i2c_bus_func *func;
struct nvkm_i2c_pad *pad;
#define NVKM_I2C_BUS_CCB(n) /* 'n' is ccb index */ (n)
-#define NVKM_I2C_BUS_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
+#define NVKM_I2C_BUS_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x10)
#define NVKM_I2C_BUS_PRI /* ccb primary comm. port */ -1
#define NVKM_I2C_BUS_SEC /* ccb secondary comm. port */ -2
int id;
@@ -38,7 +38,7 @@ struct nvkm_i2c_aux {
const struct nvkm_i2c_aux_func *func;
struct nvkm_i2c_pad *pad;
#define NVKM_I2C_AUX_CCB(n) /* 'n' is ccb index */ (n)
-#define NVKM_I2C_AUX_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
+#define NVKM_I2C_AUX_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x10)
int id;
struct mutex mutex;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 70e7887ef4b4..2fd2f2433fc7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -17,6 +17,7 @@ struct nvkm_vma {
bool part:1; /* Region was split from an allocated region by map(). */
bool busy:1; /* Region busy (for temporarily preventing user access). */
bool mapped:1; /* Region contains valid pages. */
+ bool no_comp:1; /* Force no memory compression. */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
struct nvkm_tags *tags; /* Compression tag reference. */
};
@@ -27,10 +28,26 @@ struct nvkm_vmm {
const char *name;
u32 debug;
struct kref kref;
- struct mutex mutex;
+
+ struct {
+ struct mutex vmm;
+ struct mutex ref;
+ struct mutex map;
+ } mutex;
u64 start;
u64 limit;
+ struct {
+ struct {
+ u64 addr;
+ u64 size;
+ } p;
+ struct {
+ u64 addr;
+ u64 size;
+ } n;
+ bool raw;
+ } managed;
struct nvkm_vmm_pt *pd;
struct list_head join;
@@ -70,6 +87,7 @@ struct nvkm_vmm_map {
const struct nvkm_vmm_page *page;
+ bool no_comp;
struct nvkm_tags *tags;
u64 next;
u64 type;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 82dab51d8aeb..30afbec9e3b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -35,6 +35,7 @@
#include "nouveau_chan.h"
#include "nouveau_abi16.h"
#include "nouveau_vmm.h"
+#include "nouveau_sched.h"
static struct nouveau_abi16 *
nouveau_abi16(struct drm_file *file_priv)
@@ -125,6 +126,17 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
{
struct nouveau_abi16_ntfy *ntfy, *temp;
+ /* When a client exits without waiting for it's queued up jobs to
+ * finish it might happen that we fault the channel. This is due to
+ * drm_file_free() calling drm_gem_release() before the postclose()
+ * callback. Hence, we can't tear down this scheduler entity before
+ * uvmm mappings are unmapped. Currently, we can't detect this case.
+ *
+ * However, this should be rare and harmless, since the channel isn't
+ * needed anymore.
+ */
+ nouveau_sched_entity_fini(&chan->sched_entity);
+
/* wait for all activity to stop before cleaning up */
if (chan->chan)
nouveau_channel_idle(chan->chan);
@@ -261,6 +273,13 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (!drm->channel)
return nouveau_abi16_put(abi16, -ENODEV);
+ /* If uvmm wasn't initialized until now disable it completely to prevent
+ * userspace from mixing up UAPIs.
+ *
+ * The client lock is already acquired by nouveau_abi16_get().
+ */
+ __nouveau_cli_disable_uvmm_noinit(cli);
+
device = &abi16->device;
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
@@ -304,6 +323,11 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
+ ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
+ drm->sched_wq);
+ if (ret)
+ goto done;
+
init->channel = chan->chan->chid;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 27eae85f33e6..9f538486c10e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -26,6 +26,7 @@ struct nouveau_abi16_chan {
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
+ struct nouveau_sched_entity sched_entity;
};
struct nouveau_abi16 {
@@ -43,28 +44,6 @@ int nouveau_abi16_usif(struct drm_file *, void *data, u32 size);
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
-struct drm_nouveau_channel_alloc {
- uint32_t fb_ctxdma_handle;
- uint32_t tt_ctxdma_handle;
-
- int channel;
- uint32_t pushbuf_domains;
-
- /* Notifier memory */
- uint32_t notifier_handle;
-
- /* DRM-enforced subchannel assignments */
- struct {
- uint32_t handle;
- uint32_t grclass;
- } subchan[8];
- uint32_t nr_subchan;
-};
-
-struct drm_nouveau_channel_free {
- int channel;
-};
-
struct drm_nouveau_grobj_alloc {
int channel;
uint32_t handle;
@@ -83,31 +62,12 @@ struct drm_nouveau_gpuobj_free {
uint32_t handle;
};
-#define NOUVEAU_GETPARAM_PCI_VENDOR 3
-#define NOUVEAU_GETPARAM_PCI_DEVICE 4
-#define NOUVEAU_GETPARAM_BUS_TYPE 5
-#define NOUVEAU_GETPARAM_FB_SIZE 8
-#define NOUVEAU_GETPARAM_AGP_SIZE 9
-#define NOUVEAU_GETPARAM_CHIPSET_ID 11
-#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
-#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
-#define NOUVEAU_GETPARAM_PTIMER_TIME 14
-#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
-#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
-struct drm_nouveau_getparam {
- uint64_t param;
- uint64_t value;
-};
-
struct drm_nouveau_setparam {
uint64_t param;
uint64_t value;
};
-#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
-#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
-#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8cf096f841a9..a2ae8c21e4dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -220,6 +220,9 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
int optimus_funcs;
struct pci_dev *parent_pdev;
+ if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
+ return;
+
*has_pr3 = false;
parent_pdev = pci_upstream_bridge(pdev);
if (parent_pdev) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c2ec91cc845d..19cab37ac69c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -199,12 +199,12 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
struct nouveau_bo *
nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
- u32 tile_mode, u32 tile_flags)
+ u32 tile_mode, u32 tile_flags, bool internal)
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
struct nvif_mmu *mmu = &cli->mmu;
- struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
+ struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm;
int i, pi = -1;
if (!*size) {
@@ -215,6 +215,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
return ERR_PTR(-ENOMEM);
+
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
INIT_LIST_HEAD(&nvbo->vma_list);
@@ -232,68 +233,103 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
nvbo->force_coherent = true;
}
- if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
- nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
+ nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
+ if (!nouveau_cli_uvmm(cli) || internal) {
+ /* for BO noVM allocs, don't assign kinds */
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+ } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+ nvbo->comp = (tile_flags & 0x00030000) >> 16;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ nvbo->zeta = (tile_flags & 0x00000007);
}
+ nvbo->mode = tile_mode;
+
+ /* Determine the desirable target GPU page size for the buffer. */
+ for (i = 0; i < vmm->page_nr; i++) {
+ /* Because we cannot currently allow VMM maps to fail
+ * during buffer migration, we need to determine page
+ * size for the buffer up-front, and pre-allocate its
+ * page tables.
+ *
+ * Skip page sizes that can't support needed domains.
+ */
+ if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
+ (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
+ continue;
+ if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
+ (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
+ continue;
- nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
- } else
- if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- nvbo->kind = (tile_flags & 0x00007f00) >> 8;
- nvbo->comp = (tile_flags & 0x00030000) >> 16;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ /* Select this page size if it's the first that supports
+ * the potential memory domains, or when it's compatible
+ * with the requested compression settings.
+ */
+ if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
+ pi = i;
+
+ /* Stop once the buffer is larger than the current page size. */
+ if (*size >= 1ULL << vmm->page[i].shift)
+ break;
+ }
+
+ if (WARN_ON(pi < 0)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
- } else {
- nvbo->zeta = (tile_flags & 0x00000007);
- }
- nvbo->mode = tile_mode;
- nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
-
- /* Determine the desirable target GPU page size for the buffer. */
- for (i = 0; i < vmm->page_nr; i++) {
- /* Because we cannot currently allow VMM maps to fail
- * during buffer migration, we need to determine page
- * size for the buffer up-front, and pre-allocate its
- * page tables.
- *
- * Skip page sizes that can't support needed domains.
- */
- if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
- (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
- continue;
- if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
- (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
- continue;
- /* Select this page size if it's the first that supports
- * the potential memory domains, or when it's compatible
- * with the requested compression settings.
- */
- if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
- pi = i;
-
- /* Stop once the buffer is larger than the current page size. */
- if (*size >= 1ULL << vmm->page[i].shift)
- break;
- }
+ /* Disable compression if suitable settings couldn't be found. */
+ if (nvbo->comp && !vmm->page[pi].comp) {
+ if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
+ nvbo->kind = mmu->kind[nvbo->kind];
+ nvbo->comp = 0;
+ }
+ nvbo->page = vmm->page[pi].shift;
+ } else {
+ /* reject other tile flags when in VM mode. */
+ if (tile_mode)
+ return ERR_PTR(-EINVAL);
+ if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
+ return ERR_PTR(-EINVAL);
- if (WARN_ON(pi < 0)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
+ /* Determine the desirable target GPU page size for the buffer. */
+ for (i = 0; i < vmm->page_nr; i++) {
+ /* Because we cannot currently allow VMM maps to fail
+ * during buffer migration, we need to determine page
+ * size for the buffer up-front, and pre-allocate its
+ * page tables.
+ *
+ * Skip page sizes that can't support needed domains.
+ */
+ if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
+ continue;
+ if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
+ (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
+ continue;
- /* Disable compression if suitable settings couldn't be found. */
- if (nvbo->comp && !vmm->page[pi].comp) {
- if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
- nvbo->kind = mmu->kind[nvbo->kind];
- nvbo->comp = 0;
+ if (pi < 0)
+ pi = i;
+ /* Stop once the buffer is larger than the current page size. */
+ if (*size >= 1ULL << vmm->page[i].shift)
+ break;
+ }
+ if (WARN_ON(pi < 0)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
+ }
+ nvbo->page = vmm->page[pi].shift;
}
- nvbo->page = vmm->page[pi].shift;
nouveau_bo_fixup_align(nvbo, align, size);
@@ -306,18 +342,26 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
{
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
int ret;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ .resv = robj,
+ };
nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
- ret = ttm_bo_init_validate(nvbo->bo.bdev, &nvbo->bo, type,
- &nvbo->placement, align >> PAGE_SHIFT, false,
+ ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type,
+ &nvbo->placement, align >> PAGE_SHIFT, &ctx,
sg, robj, nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
+ if (!robj)
+ ttm_bo_unreserve(&nvbo->bo);
+
return 0;
}
@@ -331,7 +375,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
int ret;
nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
- tile_flags);
+ tile_flags, true);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
@@ -339,6 +383,11 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
dma_resv_init(&nvbo->bo.base._resv);
drm_vma_node_reset(&nvbo->bo.base.vma_node);
+ /* This must be called before ttm_bo_init_reserved(). Subsequent
+ * bo_move() callbacks might already iterate the GEMs GPUVA list.
+ */
+ drm_gem_gpuva_init(&nvbo->bo.base);
+
ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret)
return ret;
@@ -817,29 +866,39 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
mutex_lock(&cli->mutex);
else
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
+
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
- if (ret == 0) {
- ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
- if (ret == 0) {
- ret = nouveau_fence_new(chan, false, &fence);
- if (ret == 0) {
- /* TODO: figure out a better solution here
- *
- * wait on the fence here explicitly as going through
- * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
- *
- * Without this the operation can timeout and we'll fallback to a
- * software copy, which might take several minutes to finish.
- */
- nouveau_fence_wait(fence, false, false);
- ret = ttm_bo_move_accel_cleanup(bo,
- &fence->base,
- evict, false,
- new_reg);
- nouveau_fence_unref(&fence);
- }
- }
+ if (ret)
+ goto out_unlock;
+
+ ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = nouveau_fence_new(&fence);
+ if (ret)
+ goto out_unlock;
+
+ ret = nouveau_fence_emit(fence, chan);
+ if (ret) {
+ nouveau_fence_unref(&fence);
+ goto out_unlock;
}
+
+ /* TODO: figure out a better solution here
+ *
+ * wait on the fence here explicitly as going through
+ * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
+ *
+ * Without this the operation can timeout and we'll fallback to a
+ * software copy, which might take several minutes to finish.
+ */
+ nouveau_fence_wait(fence, false, false);
+ ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false,
+ new_reg);
+ nouveau_fence_unref(&fence);
+
+out_unlock:
mutex_unlock(&cli->mutex);
return ret;
}
@@ -935,6 +994,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
list_for_each_entry(vma, &nvbo->vma_list, head) {
nouveau_vma_map(vma, mem);
}
+ nouveau_uvmm_bo_map_all(nvbo, mem);
} else {
list_for_each_entry(vma, &nvbo->vma_list, head) {
ret = dma_resv_wait_timeout(bo->base.resv,
@@ -943,6 +1003,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
WARN_ON(ret <= 0);
nouveau_vma_unmap(vma);
}
+ nouveau_uvmm_bo_unmap_all(nvbo);
}
if (new_reg)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 774dd93ca76b..07f671cf895e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -26,6 +26,7 @@ struct nouveau_bo {
struct list_head entry;
int pbbo_index;
bool validate_mapped;
+ bool no_share;
/* GPU address space is independent of CPU word size */
uint64_t offset;
@@ -73,7 +74,7 @@ extern struct ttm_device_funcs nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
- u32 domain, u32 tile_mode, u32 tile_flags);
+ u32 domain, u32 tile_mode, u32 tile_flags, bool internal);
int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj);
int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index e648ecd0c1a0..1fd5ccf41128 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -40,6 +40,14 @@ MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
+void
+nouveau_channel_kill(struct nouveau_channel *chan)
+{
+ atomic_set(&chan->killed, 1);
+ if (chan->fence)
+ nouveau_fence_context_kill(chan->fence, -ENODEV);
+}
+
static int
nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
{
@@ -47,9 +55,9 @@ nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
struct nouveau_cli *cli = (void *)chan->user.client;
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
- atomic_set(&chan->killed, 1);
- if (chan->fence)
- nouveau_fence_context_kill(chan->fence, -ENODEV);
+
+ if (unlikely(!atomic_read(&chan->killed)))
+ nouveau_channel_kill(chan);
return NVIF_EVENT_DROP;
}
@@ -62,9 +70,11 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(chan, false, &fence);
+ ret = nouveau_fence_new(&fence);
if (!ret) {
- ret = nouveau_fence_wait(fence, false, false);
+ ret = nouveau_fence_emit(fence, chan);
+ if (!ret)
+ ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
@@ -90,6 +100,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
if (cli)
nouveau_svmm_part(chan->vmm->svmm, chan->inst);
+ nvif_object_dtor(&chan->blit);
nvif_object_dtor(&chan->nvsw);
nvif_object_dtor(&chan->gart);
nvif_object_dtor(&chan->vram);
@@ -148,7 +159,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
chan->device = device;
chan->drm = drm;
- chan->vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
+ chan->vmm = nouveau_cli_vmm(cli);
atomic_set(&chan->killed, 0);
/* allocate memory for dma push buffer */
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index e06a8ffed31a..5de2ef4e98c2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -53,6 +53,7 @@ struct nouveau_channel {
u32 user_put;
struct nvif_object user;
+ struct nvif_object blit;
struct nvif_event kill;
atomic_t killed;
@@ -65,6 +66,7 @@ int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, bool priv,
u32 vram, u32 gart, struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
+void nouveau_channel_kill(struct nouveau_channel *);
extern int nouveau_vram_pushbuf;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 086b66b60d91..79ea30aac31f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -619,7 +619,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nouveau_connector_set_encoder(connector, nv_encoder);
conn_status = connector_status_connected;
- drm_dp_cec_set_edid(&nv_connector->aux, nv_connector->edid);
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+ drm_dp_cec_set_edid(&nv_connector->aux, nv_connector->edid);
+
goto out;
} else {
nouveau_connector_set_edid(nv_connector, NULL);
@@ -730,7 +733,8 @@ out:
#endif
nouveau_connector_set_edid(nv_connector, edid);
- nouveau_connector_set_encoder(connector, nv_encoder);
+ if (nv_encoder)
+ nouveau_connector_set_encoder(connector, nv_encoder);
return status;
}
@@ -987,7 +991,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
* "native" mode as some VBIOS tables require us to use the
* pixel clock as part of the lookup...
*/
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
nouveau_connector_detect_depth(connector);
if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
@@ -1078,7 +1082,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
case DCB_OUTPUT_TV:
return get_slave_funcs(encoder)->mode_valid(encoder, mode);
case DCB_OUTPUT_DP:
- return nv50_dp_mode_valid(connector, nv_encoder, mode, NULL);
+ return nv50_dp_mode_valid(nv_encoder, mode, NULL);
default:
BUG();
return MODE_BAD;
@@ -1407,8 +1411,7 @@ nouveau_connector_create(struct drm_device *dev,
ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
&nv_connector->conn);
if (ret) {
- kfree(nv_connector);
- return ERR_PTR(ret);
+ goto drm_conn_err;
}
ret = nvif_conn_event_ctor(&nv_connector->conn, "kmsHotplug",
@@ -1425,8 +1428,7 @@ nouveau_connector_create(struct drm_device *dev,
if (ret) {
nvif_event_dtor(&nv_connector->hpd);
nvif_conn_dtor(&nv_connector->conn);
- kfree(nv_connector);
- return ERR_PTR(ret);
+ goto drm_conn_err;
}
}
}
@@ -1474,4 +1476,9 @@ nouveau_connector_create(struct drm_device *dev,
drm_connector_register(connector);
return connector;
+
+drm_conn_err:
+ drm_connector_cleanup(connector);
+ kfree(nv_connector);
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 99d022a91afc..053f703f2f68 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -203,6 +203,44 @@ nouveau_debugfs_pstate_open(struct inode *inode, struct file *file)
return single_open(file, nouveau_debugfs_pstate_get, inode->i_private);
}
+static void
+nouveau_debugfs_gpuva_regions(struct seq_file *m, struct nouveau_uvmm *uvmm)
+{
+ MA_STATE(mas, &uvmm->region_mt, 0, 0);
+ struct nouveau_uvma_region *reg;
+
+ seq_puts (m, " VA regions | start | range | end \n");
+ seq_puts (m, "----------------------------------------------------------------------------\n");
+ mas_for_each(&mas, reg, ULONG_MAX)
+ seq_printf(m, " | 0x%016llx | 0x%016llx | 0x%016llx\n",
+ reg->va.addr, reg->va.range, reg->va.addr + reg->va.range);
+}
+
+static int
+nouveau_debugfs_gpuva(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
+ struct nouveau_cli *cli;
+
+ mutex_lock(&drm->clients_lock);
+ list_for_each_entry(cli, &drm->clients, head) {
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+
+ if (!uvmm)
+ continue;
+
+ nouveau_uvmm_lock(uvmm);
+ drm_debugfs_gpuva_info(m, &uvmm->umgr);
+ seq_puts(m, "\n");
+ nouveau_debugfs_gpuva_regions(m, uvmm);
+ nouveau_uvmm_unlock(uvmm);
+ }
+ mutex_unlock(&drm->clients_lock);
+
+ return 0;
+}
+
static const struct file_operations nouveau_pstate_fops = {
.owner = THIS_MODULE,
.open = nouveau_debugfs_pstate_open,
@@ -214,6 +252,7 @@ static const struct file_operations nouveau_pstate_fops = {
static struct drm_info_list nouveau_debugfs_list[] = {
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "strap_peek", nouveau_debugfs_strap_peek, 0, NULL },
+ DRM_DEBUGFS_GPUVA_INFO(nouveau_debugfs_gpuva, NULL),
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index ec3ffff487fc..99977e5fe716 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -465,7 +465,8 @@ nouveau_display_hpd_work(struct work_struct *work)
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
u32 pending;
- bool changed = false;
+ int changed = 0;
+ struct drm_connector *first_changed_connector = NULL;
pm_runtime_get_sync(dev->dev);
@@ -509,7 +510,12 @@ nouveau_display_hpd_work(struct work_struct *work)
if (old_epoch_counter == connector->epoch_counter)
continue;
- changed = true;
+ changed++;
+ if (!first_changed_connector) {
+ drm_connector_get(connector);
+ first_changed_connector = connector;
+ }
+
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
connector->base.id, connector->name,
drm_get_connector_status_name(old_status),
@@ -520,9 +526,14 @@ nouveau_display_hpd_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
- if (changed)
+ if (changed == 1)
+ drm_kms_helper_connector_hotplug_event(first_changed_connector);
+ else if (changed > 0)
drm_kms_helper_hotplug_event(dev);
+ if (first_changed_connector)
+ drm_connector_put(first_changed_connector);
+
pm_runtime_mark_last_busy(drm->dev->dev);
noop:
pm_runtime_put_autosuspend(dev->dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 789857faa048..61e84562094a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -209,7 +209,8 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
goto done;
}
- nouveau_fence_new(dmem->migrate.chan, false, &fence);
+ if (!nouveau_fence_new(&fence))
+ nouveau_fence_emit(fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -402,7 +403,8 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
}
}
- nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence);
+ if (!nouveau_fence_new(&fence))
+ nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
@@ -675,7 +677,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
addr += PAGE_SIZE;
}
- nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
+ if (!nouveau_fence_new(&fence))
+ nouveau_fence_emit(fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index d49b4875fc3c..6a4980b2d4d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -267,8 +267,7 @@ nouveau_dp_irq(struct work_struct *work)
* yet)
*/
enum drm_mode_status
-nv50_dp_mode_valid(struct drm_connector *connector,
- struct nouveau_encoder *outp,
+nv50_dp_mode_valid(struct nouveau_encoder *outp,
const struct drm_display_mode *mode,
unsigned *out_clock)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index cc7c5b4a05fd..4396f501b16a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -68,6 +68,9 @@
#include "nouveau_platform.h"
#include "nouveau_svm.h"
#include "nouveau_dmem.h"
+#include "nouveau_exec.h"
+#include "nouveau_uvmm.h"
+#include "nouveau_sched.h"
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@@ -137,10 +140,16 @@ nouveau_name(struct drm_device *dev)
static inline bool
nouveau_cli_work_ready(struct dma_fence *fence)
{
- if (!dma_fence_is_signaled(fence))
- return false;
- dma_fence_put(fence);
- return true;
+ bool ret = true;
+
+ spin_lock_irq(fence->lock);
+ if (!dma_fence_is_signaled_locked(fence))
+ ret = false;
+ spin_unlock_irq(fence->lock);
+
+ if (ret == true)
+ dma_fence_put(fence);
+ return ret;
}
static void
@@ -190,6 +199,8 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
+ nouveau_uvmm_fini(&cli->uvmm);
+ nouveau_sched_entity_fini(&cli->sched_entity);
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu);
@@ -295,6 +306,12 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
}
cli->mem = &mems[ret];
+
+ ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
+ drm->sched_wq);
+ if (ret)
+ goto done;
+
return 0;
done:
if (ret)
@@ -369,15 +386,29 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
NVDRM_NVSW, nouveau_abi16_swclass(drm),
NULL, 0, &drm->channel->nvsw);
+
+ if (ret == 0 && device->info.chipset >= 0x11) {
+ ret = nvif_object_ctor(&drm->channel->user, "drmBlit",
+ 0x005f, 0x009f,
+ NULL, 0, &drm->channel->blit);
+ }
+
if (ret == 0) {
struct nvif_push *push = drm->channel->chan.push;
- ret = PUSH_WAIT(push, 2);
- if (ret == 0)
+ ret = PUSH_WAIT(push, 8);
+ if (ret == 0) {
+ if (device->info.chipset >= 0x11) {
+ PUSH_NVSQ(push, NV05F, 0x0000, drm->channel->blit.handle);
+ PUSH_NVSQ(push, NV09F, 0x0120, 0,
+ 0x0124, 1,
+ 0x0128, 2);
+ }
PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
+ }
}
if (ret) {
- NV_ERROR(drm, "failed to allocate sw class, %d\n", ret);
+ NV_ERROR(drm, "failed to allocate sw or blit class, %d\n", ret);
nouveau_accel_gr_fini(drm);
return;
}
@@ -548,10 +579,14 @@ nouveau_drm_device_init(struct drm_device *dev)
nvif_parent_ctor(&nouveau_parent, &drm->parent);
drm->master.base.object.parent = &drm->parent;
- ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
+ ret = nouveau_sched_init(drm);
if (ret)
goto fail_alloc;
+ ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
+ if (ret)
+ goto fail_sched;
+
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
goto fail_master;
@@ -608,7 +643,6 @@ nouveau_drm_device_init(struct drm_device *dev)
}
return 0;
-
fail_dispinit:
nouveau_display_destroy(dev);
fail_dispctor:
@@ -621,6 +655,8 @@ fail_ttm:
nouveau_cli_fini(&drm->client);
fail_master:
nouveau_cli_fini(&drm->master);
+fail_sched:
+ nouveau_sched_fini(drm);
fail_alloc:
nvif_parent_dtor(&drm->parent);
kfree(drm);
@@ -672,6 +708,8 @@ nouveau_drm_device_fini(struct drm_device *dev)
}
mutex_unlock(&drm->clients_lock);
+ nouveau_sched_fini(drm);
+
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
nvif_parent_dtor(&drm->parent);
@@ -1173,6 +1211,9 @@ nouveau_ioctls[] = {
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_VM_INIT, nouveau_uvmm_ioctl_vm_init, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_VM_BIND, nouveau_uvmm_ioctl_vm_bind, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_EXEC, nouveau_exec_ioctl_exec, DRM_RENDER_ALLOW),
};
long
@@ -1220,6 +1261,8 @@ nouveau_driver_fops = {
static struct drm_driver
driver_stub = {
.driver_features = DRIVER_GEM |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE |
+ DRIVER_GEM_GPUVA |
DRIVER_MODESET |
DRIVER_RENDER,
.open = nouveau_drm_open,
@@ -1234,10 +1277,7 @@ driver_stub = {
.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
.fops = &nouveau_driver_fops,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
.dumb_create = nouveau_display_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index b5de312a523f..1fe17ff95f5e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -10,8 +10,8 @@
#define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 3
-#define DRIVER_PATCHLEVEL 1
+#define DRIVER_MINOR 4
+#define DRIVER_PATCHLEVEL 0
/*
* 1.1.1:
@@ -63,7 +63,9 @@ struct platform_device;
#include "nouveau_fence.h"
#include "nouveau_bios.h"
+#include "nouveau_sched.h"
#include "nouveau_vmm.h"
+#include "nouveau_uvmm.h"
struct nouveau_drm_tile {
struct nouveau_fence *fence;
@@ -91,6 +93,10 @@ struct nouveau_cli {
struct nvif_mmu mmu;
struct nouveau_vmm vmm;
struct nouveau_vmm svm;
+ struct nouveau_uvmm uvmm;
+
+ struct nouveau_sched_entity sched_entity;
+
const struct nvif_mclass *mem;
struct list_head head;
@@ -112,6 +118,59 @@ struct nouveau_cli_work {
struct dma_fence_cb cb;
};
+static inline struct nouveau_uvmm *
+nouveau_cli_uvmm(struct nouveau_cli *cli)
+{
+ if (!cli || !cli->uvmm.vmm.cli)
+ return NULL;
+
+ return &cli->uvmm;
+}
+
+static inline struct nouveau_uvmm *
+nouveau_cli_uvmm_locked(struct nouveau_cli *cli)
+{
+ struct nouveau_uvmm *uvmm;
+
+ mutex_lock(&cli->mutex);
+ uvmm = nouveau_cli_uvmm(cli);
+ mutex_unlock(&cli->mutex);
+
+ return uvmm;
+}
+
+static inline struct nouveau_vmm *
+nouveau_cli_vmm(struct nouveau_cli *cli)
+{
+ struct nouveau_uvmm *uvmm;
+
+ uvmm = nouveau_cli_uvmm(cli);
+ if (uvmm)
+ return &uvmm->vmm;
+
+ if (cli->svm.cli)
+ return &cli->svm;
+
+ return &cli->vmm;
+}
+
+static inline void
+__nouveau_cli_disable_uvmm_noinit(struct nouveau_cli *cli)
+{
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+
+ if (!uvmm)
+ cli->uvmm.disabled = true;
+}
+
+static inline void
+nouveau_cli_disable_uvmm_noinit(struct nouveau_cli *cli)
+{
+ mutex_lock(&cli->mutex);
+ __nouveau_cli_disable_uvmm_noinit(cli);
+ mutex_unlock(&cli->mutex);
+}
+
void nouveau_cli_work_queue(struct nouveau_cli *, struct dma_fence *,
struct nouveau_cli_work *);
@@ -121,6 +180,32 @@ nouveau_cli(struct drm_file *fpriv)
return fpriv ? fpriv->driver_priv : NULL;
}
+static inline void
+u_free(void *addr)
+{
+ kvfree(addr);
+}
+
+static inline void *
+u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
+{
+ void *mem;
+ void __user *userptr = (void __force __user *)(uintptr_t)user;
+
+ size *= nmemb;
+
+ mem = kvmalloc(size, GFP_KERNEL);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(mem, userptr, size)) {
+ u_free(mem);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return mem;
+}
+
#include <nvif/object.h>
#include <nvif/parent.h>
@@ -222,6 +307,10 @@ struct nouveau_drm {
struct mutex lock;
bool component_registered;
} audio;
+
+ struct drm_gpu_scheduler sched;
+ struct workqueue_struct *sched_wq;
+
};
static inline struct nouveau_drm *
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 70c1ad6c4d9d..bcba1a14cfab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -143,8 +143,7 @@ enum nouveau_dp_status {
int nouveau_dp_detect(struct nouveau_connector *, struct nouveau_encoder *);
bool nouveau_dp_link_check(struct nouveau_connector *);
void nouveau_dp_irq(struct work_struct *);
-enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
- struct nouveau_encoder *,
+enum drm_mode_status nv50_dp_mode_valid(struct nouveau_encoder *,
const struct drm_display_mode *,
unsigned *clock);
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
new file mode 100644
index 000000000000..0f927adda4ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_exec.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_dma.h"
+#include "nouveau_exec.h"
+#include "nouveau_abi16.h"
+#include "nouveau_chan.h"
+#include "nouveau_sched.h"
+#include "nouveau_uvmm.h"
+
+/**
+ * DOC: Overview
+ *
+ * Nouveau's VM_BIND / EXEC UAPI consists of three ioctls: DRM_NOUVEAU_VM_INIT,
+ * DRM_NOUVEAU_VM_BIND and DRM_NOUVEAU_EXEC.
+ *
+ * In order to use the UAPI firstly a user client must initialize the VA space
+ * using the DRM_NOUVEAU_VM_INIT ioctl specifying which region of the VA space
+ * should be managed by the kernel and which by the UMD.
+ *
+ * The DRM_NOUVEAU_VM_BIND ioctl provides clients an interface to manage the
+ * userspace-managable portion of the VA space. It provides operations to map
+ * and unmap memory. Mappings may be flagged as sparse. Sparse mappings are not
+ * backed by a GEM object and the kernel will ignore GEM handles provided
+ * alongside a sparse mapping.
+ *
+ * Userspace may request memory backed mappings either within or outside of the
+ * bounds (but not crossing those bounds) of a previously mapped sparse
+ * mapping. Subsequently requested memory backed mappings within a sparse
+ * mapping will take precedence over the corresponding range of the sparse
+ * mapping. If such memory backed mappings are unmapped the kernel will make
+ * sure that the corresponding sparse mapping will take their place again.
+ * Requests to unmap a sparse mapping that still contains memory backed mappings
+ * will result in those memory backed mappings being unmapped first.
+ *
+ * Unmap requests are not bound to the range of existing mappings and can even
+ * overlap the bounds of sparse mappings. For such a request the kernel will
+ * make sure to unmap all memory backed mappings within the given range,
+ * splitting up memory backed mappings which are only partially contained
+ * within the given range. Unmap requests with the sparse flag set must match
+ * the range of a previously mapped sparse mapping exactly though.
+ *
+ * While the kernel generally permits arbitrary sequences and ranges of memory
+ * backed mappings being mapped and unmapped, either within a single or multiple
+ * VM_BIND ioctl calls, there are some restrictions for sparse mappings.
+ *
+ * The kernel does not permit to:
+ * - unmap non-existent sparse mappings
+ * - unmap a sparse mapping and map a new sparse mapping overlapping the range
+ * of the previously unmapped sparse mapping within the same VM_BIND ioctl
+ * - unmap a sparse mapping and map new memory backed mappings overlapping the
+ * range of the previously unmapped sparse mapping within the same VM_BIND
+ * ioctl
+ *
+ * When using the VM_BIND ioctl to request the kernel to map memory to a given
+ * virtual address in the GPU's VA space there is no guarantee that the actual
+ * mappings are created in the GPU's MMU. If the given memory is swapped out
+ * at the time the bind operation is executed the kernel will stash the mapping
+ * details into it's internal alloctor and create the actual MMU mappings once
+ * the memory is swapped back in. While this is transparent for userspace, it is
+ * guaranteed that all the backing memory is swapped back in and all the memory
+ * mappings, as requested by userspace previously, are actually mapped once the
+ * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
+ *
+ * A VM_BIND job can be executed either synchronously or asynchronously. If
+ * exectued asynchronously, userspace may provide a list of syncobjs this job
+ * will wait for and/or a list of syncobj the kernel will signal once the
+ * VM_BIND job finished execution. If executed synchronously the ioctl will
+ * block until the bind job is finished. For synchronous jobs the kernel will
+ * not permit any syncobjs submitted to the kernel.
+ *
+ * To execute a push buffer the UAPI provides the DRM_NOUVEAU_EXEC ioctl. EXEC
+ * jobs are always executed asynchronously, and, equal to VM_BIND jobs, provide
+ * the option to synchronize them with syncobjs.
+ *
+ * Besides that, EXEC jobs can be scheduled for a specified channel to execute on.
+ *
+ * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
+ * an up to date view of the VA space. However, the actual mappings might still
+ * be pending. Hence, EXEC jobs require to have the particular fences - of
+ * the corresponding VM_BIND jobs they depent on - attached to them.
+ */
+
+static int
+nouveau_exec_job_submit(struct nouveau_job *job)
+{
+ struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+ struct nouveau_cli *cli = job->cli;
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+ struct drm_exec *exec = &job->exec;
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ ret = nouveau_fence_new(&exec_job->fence);
+ if (ret)
+ return ret;
+
+ nouveau_uvmm_lock(uvmm);
+ drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_until_all_locked(exec) {
+ struct drm_gpuva *va;
+
+ drm_gpuva_for_each_va(va, &uvmm->umgr) {
+ if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+ continue;
+
+ ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ goto err_uvmm_unlock;
+ }
+ }
+ nouveau_uvmm_unlock(uvmm);
+
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+
+ ret = nouveau_bo_validate(nvbo, true, false);
+ if (ret)
+ goto err_exec_fini;
+ }
+
+ return 0;
+
+err_uvmm_unlock:
+ nouveau_uvmm_unlock(uvmm);
+err_exec_fini:
+ drm_exec_fini(exec);
+ return ret;
+
+}
+
+static void
+nouveau_exec_job_armed_submit(struct nouveau_job *job)
+{
+ struct drm_exec *exec = &job->exec;
+ struct drm_gem_object *obj;
+ unsigned long index;
+
+ drm_exec_for_each_locked_object(exec, index, obj)
+ dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
+
+ drm_exec_fini(exec);
+}
+
+static struct dma_fence *
+nouveau_exec_job_run(struct nouveau_job *job)
+{
+ struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+ struct nouveau_channel *chan = exec_job->chan;
+ struct nouveau_fence *fence = exec_job->fence;
+ int i, ret;
+
+ ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
+ if (ret) {
+ NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ for (i = 0; i < exec_job->push.count; i++) {
+ nv50_dma_push(chan, exec_job->push.s[i].va,
+ exec_job->push.s[i].va_len);
+ }
+
+ ret = nouveau_fence_emit(fence, chan);
+ if (ret) {
+ NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
+ WIND_RING(chan);
+ return ERR_PTR(ret);
+ }
+
+ exec_job->fence = NULL;
+
+ return &fence->base;
+}
+
+static void
+nouveau_exec_job_free(struct nouveau_job *job)
+{
+ struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+
+ nouveau_job_free(job);
+
+ nouveau_fence_unref(&exec_job->fence);
+ kfree(exec_job->push.s);
+ kfree(exec_job);
+}
+
+static enum drm_gpu_sched_stat
+nouveau_exec_job_timeout(struct nouveau_job *job)
+{
+ struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+ struct nouveau_channel *chan = exec_job->chan;
+
+ if (unlikely(!atomic_read(&chan->killed)))
+ nouveau_channel_kill(chan);
+
+ NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
+ chan->chid);
+
+ nouveau_sched_entity_fini(job->entity);
+
+ return DRM_GPU_SCHED_STAT_ENODEV;
+}
+
+static struct nouveau_job_ops nouveau_exec_job_ops = {
+ .submit = nouveau_exec_job_submit,
+ .armed_submit = nouveau_exec_job_armed_submit,
+ .run = nouveau_exec_job_run,
+ .free = nouveau_exec_job_free,
+ .timeout = nouveau_exec_job_timeout,
+};
+
+int
+nouveau_exec_job_init(struct nouveau_exec_job **pjob,
+ struct nouveau_exec_job_args *__args)
+{
+ struct nouveau_exec_job *job;
+ struct nouveau_job_args args = {};
+ int ret;
+
+ job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ job->push.count = __args->push.count;
+ if (__args->push.count) {
+ job->push.s = kmemdup(__args->push.s,
+ sizeof(*__args->push.s) *
+ __args->push.count,
+ GFP_KERNEL);
+ if (!job->push.s) {
+ ret = -ENOMEM;
+ goto err_free_job;
+ }
+ }
+
+ job->chan = __args->chan;
+
+ args.sched_entity = __args->sched_entity;
+ args.file_priv = __args->file_priv;
+
+ args.in_sync.count = __args->in_sync.count;
+ args.in_sync.s = __args->in_sync.s;
+
+ args.out_sync.count = __args->out_sync.count;
+ args.out_sync.s = __args->out_sync.s;
+
+ args.ops = &nouveau_exec_job_ops;
+ args.resv_usage = DMA_RESV_USAGE_WRITE;
+
+ ret = nouveau_job_init(&job->base, &args);
+ if (ret)
+ goto err_free_pushs;
+
+ return 0;
+
+err_free_pushs:
+ kfree(job->push.s);
+err_free_job:
+ kfree(job);
+ *pjob = NULL;
+
+ return ret;
+}
+
+static int
+nouveau_exec(struct nouveau_exec_job_args *args)
+{
+ struct nouveau_exec_job *job;
+ int ret;
+
+ ret = nouveau_exec_job_init(&job, args);
+ if (ret)
+ return ret;
+
+ ret = nouveau_job_submit(&job->base);
+ if (ret)
+ goto err_job_fini;
+
+ return 0;
+
+err_job_fini:
+ nouveau_job_fini(&job->base);
+ return ret;
+}
+
+static int
+nouveau_exec_ucopy(struct nouveau_exec_job_args *args,
+ struct drm_nouveau_exec *req)
+{
+ struct drm_nouveau_sync **s;
+ u32 inc = req->wait_count;
+ u64 ins = req->wait_ptr;
+ u32 outc = req->sig_count;
+ u64 outs = req->sig_ptr;
+ u32 pushc = req->push_count;
+ u64 pushs = req->push_ptr;
+ int ret;
+
+ if (pushc) {
+ args->push.count = pushc;
+ args->push.s = u_memcpya(pushs, pushc, sizeof(*args->push.s));
+ if (IS_ERR(args->push.s))
+ return PTR_ERR(args->push.s);
+ }
+
+ if (inc) {
+ s = &args->in_sync.s;
+
+ args->in_sync.count = inc;
+ *s = u_memcpya(ins, inc, sizeof(**s));
+ if (IS_ERR(*s)) {
+ ret = PTR_ERR(*s);
+ goto err_free_pushs;
+ }
+ }
+
+ if (outc) {
+ s = &args->out_sync.s;
+
+ args->out_sync.count = outc;
+ *s = u_memcpya(outs, outc, sizeof(**s));
+ if (IS_ERR(*s)) {
+ ret = PTR_ERR(*s);
+ goto err_free_ins;
+ }
+ }
+
+ return 0;
+
+err_free_pushs:
+ u_free(args->push.s);
+err_free_ins:
+ u_free(args->in_sync.s);
+ return ret;
+}
+
+static void
+nouveau_exec_ufree(struct nouveau_exec_job_args *args)
+{
+ u_free(args->push.s);
+ u_free(args->in_sync.s);
+ u_free(args->out_sync.s);
+}
+
+int
+nouveau_exec_ioctl_exec(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct nouveau_abi16_chan *chan16;
+ struct nouveau_channel *chan = NULL;
+ struct nouveau_exec_job_args args = {};
+ struct drm_nouveau_exec *req = data;
+ int ret = 0;
+
+ if (unlikely(!abi16))
+ return -ENOMEM;
+
+ /* abi16 locks already */
+ if (unlikely(!nouveau_cli_uvmm(cli)))
+ return nouveau_abi16_put(abi16, -ENOSYS);
+
+ list_for_each_entry(chan16, &abi16->channels, head) {
+ if (chan16->chan->chid == req->channel) {
+ chan = chan16->chan;
+ break;
+ }
+ }
+
+ if (!chan)
+ return nouveau_abi16_put(abi16, -ENOENT);
+
+ if (unlikely(atomic_read(&chan->killed)))
+ return nouveau_abi16_put(abi16, -ENODEV);
+
+ if (!chan->dma.ib_max)
+ return nouveau_abi16_put(abi16, -ENOSYS);
+
+ if (unlikely(req->push_count > NOUVEAU_GEM_MAX_PUSH)) {
+ NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
+ req->push_count, NOUVEAU_GEM_MAX_PUSH);
+ return nouveau_abi16_put(abi16, -EINVAL);
+ }
+
+ ret = nouveau_exec_ucopy(&args, req);
+ if (ret)
+ goto out;
+
+ args.sched_entity = &chan16->sched_entity;
+ args.file_priv = file_priv;
+ args.chan = chan;
+
+ ret = nouveau_exec(&args);
+ if (ret)
+ goto out_free_args;
+
+out_free_args:
+ nouveau_exec_ufree(&args);
+out:
+ return nouveau_abi16_put(abi16, ret);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h
new file mode 100644
index 000000000000..778cacd90f65
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __NOUVEAU_EXEC_H__
+#define __NOUVEAU_EXEC_H__
+
+#include <drm/drm_exec.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_sched.h"
+
+struct nouveau_exec_job_args {
+ struct drm_file *file_priv;
+ struct nouveau_sched_entity *sched_entity;
+
+ struct drm_exec exec;
+ struct nouveau_channel *chan;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } in_sync;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } out_sync;
+
+ struct {
+ struct drm_nouveau_exec_push *s;
+ u32 count;
+ } push;
+};
+
+struct nouveau_exec_job {
+ struct nouveau_job base;
+ struct nouveau_fence *fence;
+ struct nouveau_channel *chan;
+
+ struct {
+ struct drm_nouveau_exec_push *s;
+ u32 count;
+ } push;
+};
+
+#define to_nouveau_exec_job(job) \
+ container_of((job), struct nouveau_exec_job, base)
+
+int nouveau_exec_job_init(struct nouveau_exec_job **job,
+ struct nouveau_exec_job_args *args);
+
+int nouveau_exec_ioctl_exec(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ee5e9d40c166..77c739a55b19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -96,6 +96,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
if (nouveau_fence_signal(fence))
nvif_event_block(&fctx->event);
}
+ fctx->killed = 1;
spin_unlock_irqrestore(&fctx->lock, flags);
}
@@ -210,6 +211,9 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
int ret;
+ if (unlikely(!chan->fence))
+ return -ENODEV;
+
fence->channel = chan;
fence->timeout = jiffies + (15 * HZ);
@@ -226,6 +230,12 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
dma_fence_get(&fence->base);
spin_lock_irq(&fctx->lock);
+ if (unlikely(fctx->killed)) {
+ spin_unlock_irq(&fctx->lock);
+ dma_fence_put(&fence->base);
+ return -ENODEV;
+ }
+
if (nouveau_fence_update(chan, fctx))
nvif_event_block(&fctx->event);
@@ -396,25 +406,16 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
}
int
-nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
- struct nouveau_fence **pfence)
+nouveau_fence_new(struct nouveau_fence **pfence)
{
struct nouveau_fence *fence;
- int ret = 0;
-
- if (unlikely(!chan->fence))
- return -ENODEV;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
- ret = nouveau_fence_emit(fence, chan);
- if (ret)
- nouveau_fence_unref(&fence);
-
*pfence = fence;
- return ret;
+ return 0;
}
static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 0ca2bc85adf6..2c72d96ef17d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -17,8 +17,7 @@ struct nouveau_fence {
unsigned long timeout;
};
-int nouveau_fence_new(struct nouveau_channel *, bool sysmem,
- struct nouveau_fence **);
+int nouveau_fence_new(struct nouveau_fence **);
void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
@@ -45,7 +44,7 @@ struct nouveau_fence_chan {
char name[32];
struct nvif_event event;
- int notify_ref, dead;
+ int notify_ref, dead, killed;
};
struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ab9062e50977..f39360870c70 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -103,13 +103,17 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
- struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+ struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
int ret;
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
+ if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv)
+ return -EPERM;
+
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return ret;
@@ -120,7 +124,11 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out;
}
- ret = nouveau_vma_new(nvbo, vmm, &vma);
+ /* only create a VMA on binding */
+ if (!nouveau_cli_uvmm(cli))
+ ret = nouveau_vma_new(nvbo, vmm, &vma);
+ else
+ ret = 0;
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out:
@@ -180,13 +188,16 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
- struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
+ struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
int ret;
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return;
+ if (nouveau_cli_uvmm(cli))
+ return;
+
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return;
@@ -209,6 +220,7 @@ const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
.free = nouveau_gem_object_del,
.open = nouveau_gem_object_open,
.close = nouveau_gem_object_close,
+ .export = nouveau_gem_prime_export,
.pin = nouveau_gem_prime_pin,
.unpin = nouveau_gem_prime_unpin,
.get_sg_table = nouveau_gem_prime_get_sg_table,
@@ -224,18 +236,28 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
struct nouveau_bo **pnvbo)
{
struct nouveau_drm *drm = cli->drm;
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
+ struct dma_resv *resv = NULL;
struct nouveau_bo *nvbo;
int ret;
+ if (domain & NOUVEAU_GEM_DOMAIN_NO_SHARE) {
+ if (unlikely(!uvmm))
+ return -EINVAL;
+
+ resv = &uvmm->resv;
+ }
+
if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
domain |= NOUVEAU_GEM_DOMAIN_CPU;
nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
- tile_flags);
+ tile_flags, false);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
+ nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE;
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
@@ -246,7 +268,14 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
return ret;
}
- ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
+ if (resv)
+ dma_resv_lock(resv, NULL);
+
+ ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv);
+
+ if (resv)
+ dma_resv_unlock(resv);
+
if (ret)
return ret;
@@ -269,7 +298,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
- struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
+ struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
if (is_power_of_2(nvbo->valid_domains))
@@ -279,13 +308,15 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->offset;
- if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+ if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50 &&
+ !nouveau_cli_uvmm(cli)) {
vma = nouveau_vma_find(nvbo, vmm);
if (!vma)
return -EINVAL;
rep->offset = vma->addr;
- }
+ } else
+ rep->offset = 0;
rep->size = nvbo->bo.base.size;
rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
@@ -310,6 +341,11 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo = NULL;
int ret = 0;
+ /* If uvmm wasn't initialized until now disable it completely to prevent
+ * userspace from mixing up UAPIs.
+ */
+ nouveau_cli_disable_uvmm_noinit(cli);
+
ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
@@ -613,32 +649,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return 0;
}
-static inline void
-u_free(void *addr)
-{
- kvfree(addr);
-}
-
-static inline void *
-u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
-{
- void *mem;
- void __user *userptr = (void __force __user *)(uintptr_t)user;
-
- size *= nmemb;
-
- mem = kvmalloc(size, GFP_KERNEL);
- if (!mem)
- return ERR_PTR(-ENOMEM);
-
- if (copy_from_user(mem, userptr, size)) {
- u_free(mem);
- return ERR_PTR(-EFAULT);
- }
-
- return mem;
-}
-
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf *req,
@@ -747,6 +757,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (unlikely(!abi16))
return -ENOMEM;
+ if (unlikely(nouveau_cli_uvmm(cli)))
+ return -ENOSYS;
+
list_for_each_entry(temp, &abi16->channels, head) {
if (temp->chan->chid == req->channel) {
chan = temp->chan;
@@ -899,8 +912,11 @@ revalidate:
}
}
- ret = nouveau_fence_new(chan, false, &fence);
+ ret = nouveau_fence_new(&fence);
+ if (!ret)
+ ret = nouveau_fence_emit(fence, chan);
if (ret) {
+ nouveau_fence_unref(&fence);
NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 3b919c7c931c..10814d446435 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -37,5 +37,6 @@ extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
struct drm_device *, struct dma_buf_attachment *, struct sg_table *);
-
+struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
+ int flags);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 76c86d8bb01e..5365a3d3a17f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -35,4 +35,9 @@ int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
void nouveau_mem_fini(struct nouveau_mem *);
int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
+int
+nouveau_mem_map_fixed(struct nouveau_mem *mem,
+ struct nvif_vmm *vmm,
+ u8 kind, u64 addr,
+ u64 offset, u64 range);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index f42c2b1b0363..1b2ff0c40fc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -50,7 +50,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
dma_resv_lock(robj, NULL);
nvbo = nouveau_bo_alloc(&drm->client, &size, &align,
- NOUVEAU_GEM_DOMAIN_GART, 0, 0);
+ NOUVEAU_GEM_DOMAIN_GART, 0, 0, true);
if (IS_ERR(nvbo)) {
obj = ERR_CAST(nvbo);
goto unlock;
@@ -102,3 +102,14 @@ void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
nouveau_bo_unpin(nvbo);
}
+
+struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
+ int flags)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(gobj);
+
+ if (nvbo->no_share)
+ return ERR_PTR(-EPERM);
+
+ return drm_gem_prime_export(gobj, flags);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
new file mode 100644
index 000000000000..3424a1bf6af3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: MIT
+
+#include <linux/slab.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/drm_syncobj.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_dma.h"
+#include "nouveau_exec.h"
+#include "nouveau_abi16.h"
+#include "nouveau_sched.h"
+
+/* FIXME
+ *
+ * We want to make sure that jobs currently executing can't be deferred by
+ * other jobs competing for the hardware. Otherwise we might end up with job
+ * timeouts just because of too many clients submitting too many jobs. We don't
+ * want jobs to time out because of system load, but because of the job being
+ * too bulky.
+ *
+ * For now allow for up to 16 concurrent jobs in flight until we know how many
+ * rings the hardware can process in parallel.
+ */
+#define NOUVEAU_SCHED_HW_SUBMISSIONS 16
+#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
+
+int
+nouveau_job_init(struct nouveau_job *job,
+ struct nouveau_job_args *args)
+{
+ struct nouveau_sched_entity *entity = args->sched_entity;
+ int ret;
+
+ job->file_priv = args->file_priv;
+ job->cli = nouveau_cli(args->file_priv);
+ job->entity = entity;
+
+ job->sync = args->sync;
+ job->resv_usage = args->resv_usage;
+
+ job->ops = args->ops;
+
+ job->in_sync.count = args->in_sync.count;
+ if (job->in_sync.count) {
+ if (job->sync)
+ return -EINVAL;
+
+ job->in_sync.data = kmemdup(args->in_sync.s,
+ sizeof(*args->in_sync.s) *
+ args->in_sync.count,
+ GFP_KERNEL);
+ if (!job->in_sync.data)
+ return -ENOMEM;
+ }
+
+ job->out_sync.count = args->out_sync.count;
+ if (job->out_sync.count) {
+ if (job->sync) {
+ ret = -EINVAL;
+ goto err_free_in_sync;
+ }
+
+ job->out_sync.data = kmemdup(args->out_sync.s,
+ sizeof(*args->out_sync.s) *
+ args->out_sync.count,
+ GFP_KERNEL);
+ if (!job->out_sync.data) {
+ ret = -ENOMEM;
+ goto err_free_in_sync;
+ }
+
+ job->out_sync.objs = kcalloc(job->out_sync.count,
+ sizeof(*job->out_sync.objs),
+ GFP_KERNEL);
+ if (!job->out_sync.objs) {
+ ret = -ENOMEM;
+ goto err_free_out_sync;
+ }
+
+ job->out_sync.chains = kcalloc(job->out_sync.count,
+ sizeof(*job->out_sync.chains),
+ GFP_KERNEL);
+ if (!job->out_sync.chains) {
+ ret = -ENOMEM;
+ goto err_free_objs;
+ }
+
+ }
+
+ ret = drm_sched_job_init(&job->base, &entity->base, NULL);
+ if (ret)
+ goto err_free_chains;
+
+ job->state = NOUVEAU_JOB_INITIALIZED;
+
+ return 0;
+
+err_free_chains:
+ kfree(job->out_sync.chains);
+err_free_objs:
+ kfree(job->out_sync.objs);
+err_free_out_sync:
+ kfree(job->out_sync.data);
+err_free_in_sync:
+ kfree(job->in_sync.data);
+return ret;
+}
+
+void
+nouveau_job_free(struct nouveau_job *job)
+{
+ kfree(job->in_sync.data);
+ kfree(job->out_sync.data);
+ kfree(job->out_sync.objs);
+ kfree(job->out_sync.chains);
+}
+
+void nouveau_job_fini(struct nouveau_job *job)
+{
+ dma_fence_put(job->done_fence);
+ drm_sched_job_cleanup(&job->base);
+ job->ops->free(job);
+}
+
+static int
+sync_find_fence(struct nouveau_job *job,
+ struct drm_nouveau_sync *sync,
+ struct dma_fence **fence)
+{
+ u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
+ u64 point = 0;
+ int ret;
+
+ if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
+ stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
+ return -EOPNOTSUPP;
+
+ if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
+ point = sync->timeline_value;
+
+ ret = drm_syncobj_find_fence(job->file_priv,
+ sync->handle, point,
+ 0 /* flags */, fence);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+nouveau_job_add_deps(struct nouveau_job *job)
+{
+ struct dma_fence *in_fence = NULL;
+ int ret, i;
+
+ for (i = 0; i < job->in_sync.count; i++) {
+ struct drm_nouveau_sync *sync = &job->in_sync.data[i];
+
+ ret = sync_find_fence(job, sync, &in_fence);
+ if (ret) {
+ NV_PRINTK(warn, job->cli,
+ "Failed to find syncobj (-> in): handle=%d\n",
+ sync->handle);
+ return ret;
+ }
+
+ ret = drm_sched_job_add_dependency(&job->base, in_fence);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+nouveau_job_fence_attach_cleanup(struct nouveau_job *job)
+{
+ int i;
+
+ for (i = 0; i < job->out_sync.count; i++) {
+ struct drm_syncobj *obj = job->out_sync.objs[i];
+ struct dma_fence_chain *chain = job->out_sync.chains[i];
+
+ if (obj)
+ drm_syncobj_put(obj);
+
+ if (chain)
+ dma_fence_chain_free(chain);
+ }
+}
+
+static int
+nouveau_job_fence_attach_prepare(struct nouveau_job *job)
+{
+ int i, ret;
+
+ for (i = 0; i < job->out_sync.count; i++) {
+ struct drm_nouveau_sync *sync = &job->out_sync.data[i];
+ struct drm_syncobj **pobj = &job->out_sync.objs[i];
+ struct dma_fence_chain **pchain = &job->out_sync.chains[i];
+ u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
+
+ if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
+ stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
+ ret = -EINVAL;
+ goto err_sync_cleanup;
+ }
+
+ *pobj = drm_syncobj_find(job->file_priv, sync->handle);
+ if (!*pobj) {
+ NV_PRINTK(warn, job->cli,
+ "Failed to find syncobj (-> out): handle=%d\n",
+ sync->handle);
+ ret = -ENOENT;
+ goto err_sync_cleanup;
+ }
+
+ if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
+ *pchain = dma_fence_chain_alloc();
+ if (!*pchain) {
+ ret = -ENOMEM;
+ goto err_sync_cleanup;
+ }
+ }
+ }
+
+ return 0;
+
+err_sync_cleanup:
+ nouveau_job_fence_attach_cleanup(job);
+ return ret;
+}
+
+static void
+nouveau_job_fence_attach(struct nouveau_job *job)
+{
+ struct dma_fence *fence = job->done_fence;
+ int i;
+
+ for (i = 0; i < job->out_sync.count; i++) {
+ struct drm_nouveau_sync *sync = &job->out_sync.data[i];
+ struct drm_syncobj **pobj = &job->out_sync.objs[i];
+ struct dma_fence_chain **pchain = &job->out_sync.chains[i];
+ u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
+
+ if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
+ drm_syncobj_add_point(*pobj, *pchain, fence,
+ sync->timeline_value);
+ } else {
+ drm_syncobj_replace_fence(*pobj, fence);
+ }
+
+ drm_syncobj_put(*pobj);
+ *pobj = NULL;
+ *pchain = NULL;
+ }
+}
+
+int
+nouveau_job_submit(struct nouveau_job *job)
+{
+ struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
+ struct dma_fence *done_fence = NULL;
+ int ret;
+
+ ret = nouveau_job_add_deps(job);
+ if (ret)
+ goto err;
+
+ ret = nouveau_job_fence_attach_prepare(job);
+ if (ret)
+ goto err;
+
+ /* Make sure the job appears on the sched_entity's queue in the same
+ * order as it was submitted.
+ */
+ mutex_lock(&entity->mutex);
+
+ /* Guarantee we won't fail after the submit() callback returned
+ * successfully.
+ */
+ if (job->ops->submit) {
+ ret = job->ops->submit(job);
+ if (ret)
+ goto err_cleanup;
+ }
+
+ drm_sched_job_arm(&job->base);
+ job->done_fence = dma_fence_get(&job->base.s_fence->finished);
+ if (job->sync)
+ done_fence = dma_fence_get(job->done_fence);
+
+ if (job->ops->armed_submit)
+ job->ops->armed_submit(job);
+
+ nouveau_job_fence_attach(job);
+
+ /* Set job state before pushing the job to the scheduler,
+ * such that we do not overwrite the job state set in run().
+ */
+ job->state = NOUVEAU_JOB_SUBMIT_SUCCESS;
+
+ drm_sched_entity_push_job(&job->base);
+
+ mutex_unlock(&entity->mutex);
+
+ if (done_fence) {
+ dma_fence_wait(done_fence, true);
+ dma_fence_put(done_fence);
+ }
+
+ return 0;
+
+err_cleanup:
+ mutex_unlock(&entity->mutex);
+ nouveau_job_fence_attach_cleanup(job);
+err:
+ job->state = NOUVEAU_JOB_SUBMIT_FAILED;
+ return ret;
+}
+
+bool
+nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
+ struct work_struct *work)
+{
+ return queue_work(entity->sched_wq, work);
+}
+
+static struct dma_fence *
+nouveau_job_run(struct nouveau_job *job)
+{
+ struct dma_fence *fence;
+
+ fence = job->ops->run(job);
+ if (IS_ERR(fence))
+ job->state = NOUVEAU_JOB_RUN_FAILED;
+ else
+ job->state = NOUVEAU_JOB_RUN_SUCCESS;
+
+ return fence;
+}
+
+static struct dma_fence *
+nouveau_sched_run_job(struct drm_sched_job *sched_job)
+{
+ struct nouveau_job *job = to_nouveau_job(sched_job);
+
+ return nouveau_job_run(job);
+}
+
+static enum drm_gpu_sched_stat
+nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
+{
+ struct nouveau_job *job = to_nouveau_job(sched_job);
+
+ NV_PRINTK(warn, job->cli, "Job timed out.\n");
+
+ if (job->ops->timeout)
+ return job->ops->timeout(job);
+
+ return DRM_GPU_SCHED_STAT_ENODEV;
+}
+
+static void
+nouveau_sched_free_job(struct drm_sched_job *sched_job)
+{
+ struct nouveau_job *job = to_nouveau_job(sched_job);
+
+ nouveau_job_fini(job);
+}
+
+int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
+ struct drm_gpu_scheduler *sched,
+ struct workqueue_struct *sched_wq)
+{
+ mutex_init(&entity->mutex);
+ spin_lock_init(&entity->job.list.lock);
+ INIT_LIST_HEAD(&entity->job.list.head);
+ init_waitqueue_head(&entity->job.wq);
+
+ entity->sched_wq = sched_wq;
+ return drm_sched_entity_init(&entity->base,
+ DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+}
+
+void
+nouveau_sched_entity_fini(struct nouveau_sched_entity *entity)
+{
+ drm_sched_entity_destroy(&entity->base);
+}
+
+static const struct drm_sched_backend_ops nouveau_sched_ops = {
+ .run_job = nouveau_sched_run_job,
+ .timedout_job = nouveau_sched_timedout_job,
+ .free_job = nouveau_sched_free_job,
+};
+
+int nouveau_sched_init(struct nouveau_drm *drm)
+{
+ struct drm_gpu_scheduler *sched = &drm->sched;
+ long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+
+ drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq");
+ if (!drm->sched_wq)
+ return -ENOMEM;
+
+ return drm_sched_init(sched, &nouveau_sched_ops,
+ NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
+ NULL, NULL, "nouveau_sched", drm->dev->dev);
+}
+
+void nouveau_sched_fini(struct nouveau_drm *drm)
+{
+ destroy_workqueue(drm->sched_wq);
+ drm_sched_fini(&drm->sched);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
new file mode 100644
index 000000000000..27ac19792597
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef NOUVEAU_SCHED_H
+#define NOUVEAU_SCHED_H
+
+#include <linux/types.h>
+
+#include <drm/drm_exec.h>
+#include <drm/gpu_scheduler.h>
+
+#include "nouveau_drv.h"
+
+#define to_nouveau_job(sched_job) \
+ container_of((sched_job), struct nouveau_job, base)
+
+struct nouveau_job_ops;
+
+enum nouveau_job_state {
+ NOUVEAU_JOB_UNINITIALIZED = 0,
+ NOUVEAU_JOB_INITIALIZED,
+ NOUVEAU_JOB_SUBMIT_SUCCESS,
+ NOUVEAU_JOB_SUBMIT_FAILED,
+ NOUVEAU_JOB_RUN_SUCCESS,
+ NOUVEAU_JOB_RUN_FAILED,
+};
+
+struct nouveau_job_args {
+ struct drm_file *file_priv;
+ struct nouveau_sched_entity *sched_entity;
+
+ enum dma_resv_usage resv_usage;
+ bool sync;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } in_sync;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } out_sync;
+
+ struct nouveau_job_ops *ops;
+};
+
+struct nouveau_job {
+ struct drm_sched_job base;
+
+ enum nouveau_job_state state;
+
+ struct nouveau_sched_entity *entity;
+
+ struct drm_file *file_priv;
+ struct nouveau_cli *cli;
+
+ struct drm_exec exec;
+ enum dma_resv_usage resv_usage;
+ struct dma_fence *done_fence;
+
+ bool sync;
+
+ struct {
+ struct drm_nouveau_sync *data;
+ u32 count;
+ } in_sync;
+
+ struct {
+ struct drm_nouveau_sync *data;
+ struct drm_syncobj **objs;
+ struct dma_fence_chain **chains;
+ u32 count;
+ } out_sync;
+
+ struct nouveau_job_ops {
+ /* If .submit() returns without any error, it is guaranteed that
+ * armed_submit() is called.
+ */
+ int (*submit)(struct nouveau_job *);
+ void (*armed_submit)(struct nouveau_job *);
+ struct dma_fence *(*run)(struct nouveau_job *);
+ void (*free)(struct nouveau_job *);
+ enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *);
+ } *ops;
+};
+
+int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
+ u32 inc, u64 ins,
+ u32 outc, u64 outs);
+
+int nouveau_job_init(struct nouveau_job *job,
+ struct nouveau_job_args *args);
+void nouveau_job_free(struct nouveau_job *job);
+
+int nouveau_job_submit(struct nouveau_job *job);
+void nouveau_job_fini(struct nouveau_job *job);
+
+#define to_nouveau_sched_entity(entity) \
+ container_of((entity), struct nouveau_sched_entity, base)
+
+struct nouveau_sched_entity {
+ struct drm_sched_entity base;
+ struct mutex mutex;
+
+ struct workqueue_struct *sched_wq;
+
+ struct {
+ struct {
+ struct list_head head;
+ spinlock_t lock;
+ } list;
+ struct wait_queue_head wq;
+ } job;
+};
+
+int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
+ struct drm_gpu_scheduler *sched,
+ struct workqueue_struct *sched_wq);
+void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity);
+
+bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
+ struct work_struct *work);
+
+int nouveau_sched_init(struct nouveau_drm *drm);
+void nouveau_sched_fini(struct nouveau_drm *drm);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index a74ba8d84ba7..186351ecf72f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -350,7 +350,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
* VMM instead of the standard one.
*/
ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
- cli->vmm.vmm.object.oclass, true,
+ cli->vmm.vmm.object.oclass, MANAGED,
args->unmanaged_addr, args->unmanaged_size,
&(struct gp100_vmm_v0) {
.fault_replay = true,
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1469a88910e4..486f39f31a38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -24,9 +24,9 @@
*/
#include <linux/limits.h>
-#include <linux/swiotlb.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/drm_cache.h>
#include "nouveau_drv.h"
#include "nouveau_gem.h"
@@ -265,7 +265,6 @@ nouveau_ttm_init(struct nouveau_drm *drm)
struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev;
- bool need_swiotlb = false;
int typei, ret;
ret = nouveau_ttm_init_host(drm, 0);
@@ -300,13 +299,10 @@ nouveau_ttm_init(struct nouveau_drm *drm)
drm->agp.cma = pci->agp.cma;
}
-#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
- need_swiotlb = is_swiotlb_active(dev->dev);
-#endif
-
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
dev->anon_inode->i_mapping,
- dev->vma_offset_manager, need_swiotlb,
+ dev->vma_offset_manager,
+ drm_need_swiotlb(drm->client.mmu.dmabits),
drm->client.mmu.dmabits <= 32);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
new file mode 100644
index 000000000000..3a1e8538f205
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -0,0 +1,1916 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Locking:
+ *
+ * The uvmm mutex protects any operations on the GPU VA space provided by the
+ * DRM GPU VA manager.
+ *
+ * The GEMs dma_resv lock protects the GEMs GPUVA list, hence link/unlink of a
+ * mapping to it's backing GEM must be performed under this lock.
+ *
+ * Actual map/unmap operations within the fence signalling critical path are
+ * protected by installing DMA fences to the corresponding GEMs DMA
+ * reservations, such that concurrent BO moves, which itself walk the GEMs GPUVA
+ * list in order to map/unmap it's entries, can't occur concurrently.
+ *
+ * Accessing the DRM_GPUVA_INVALIDATED flag doesn't need any separate
+ * protection, since there are no accesses other than from BO move callbacks
+ * and from the fence signalling critical path, which are already protected by
+ * the corresponding GEMs DMA reservation fence.
+ */
+
+#include "nouveau_drv.h"
+#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_uvmm.h"
+
+#include <nvif/vmm.h>
+#include <nvif/mem.h>
+
+#include <nvif/class.h>
+#include <nvif/if000c.h>
+#include <nvif/if900d.h>
+
+#define NOUVEAU_VA_SPACE_BITS 47 /* FIXME */
+#define NOUVEAU_VA_SPACE_START 0x0
+#define NOUVEAU_VA_SPACE_END (1ULL << NOUVEAU_VA_SPACE_BITS)
+
+#define list_last_op(_ops) list_last_entry(_ops, struct bind_job_op, entry)
+#define list_prev_op(_op) list_prev_entry(_op, entry)
+#define list_for_each_op(_op, _ops) list_for_each_entry(_op, _ops, entry)
+#define list_for_each_op_from_reverse(_op, _ops) \
+ list_for_each_entry_from_reverse(_op, _ops, entry)
+#define list_for_each_op_safe(_op, _n, _ops) list_for_each_entry_safe(_op, _n, _ops, entry)
+
+enum vm_bind_op {
+ OP_MAP = DRM_NOUVEAU_VM_BIND_OP_MAP,
+ OP_UNMAP = DRM_NOUVEAU_VM_BIND_OP_UNMAP,
+ OP_MAP_SPARSE,
+ OP_UNMAP_SPARSE,
+};
+
+struct nouveau_uvma_prealloc {
+ struct nouveau_uvma *map;
+ struct nouveau_uvma *prev;
+ struct nouveau_uvma *next;
+};
+
+struct bind_job_op {
+ struct list_head entry;
+
+ enum vm_bind_op op;
+ u32 flags;
+
+ struct {
+ u64 addr;
+ u64 range;
+ } va;
+
+ struct {
+ u32 handle;
+ u64 offset;
+ struct drm_gem_object *obj;
+ } gem;
+
+ struct nouveau_uvma_region *reg;
+ struct nouveau_uvma_prealloc new;
+ struct drm_gpuva_ops *ops;
+};
+
+struct uvmm_map_args {
+ struct nouveau_uvma_region *region;
+ u64 addr;
+ u64 range;
+ u8 kind;
+};
+
+static int
+nouveau_uvmm_vmm_sparse_ref(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+ return nvif_vmm_raw_sparse(vmm, addr, range, true);
+}
+
+static int
+nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+ return nvif_vmm_raw_sparse(vmm, addr, range, false);
+}
+
+static int
+nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+ return nvif_vmm_raw_get(vmm, addr, range, PAGE_SHIFT);
+}
+
+static int
+nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+ return nvif_vmm_raw_put(vmm, addr, range, PAGE_SHIFT);
+}
+
+static int
+nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range, bool sparse)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+
+ return nvif_vmm_raw_unmap(vmm, addr, range, PAGE_SHIFT, sparse);
+}
+
+static int
+nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range,
+ u64 bo_offset, u8 kind,
+ struct nouveau_mem *mem)
+{
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+ union {
+ struct gf100_vmm_map_v0 gf100;
+ } args;
+ u32 argc = 0;
+
+ switch (vmm->object.oclass) {
+ case NVIF_CLASS_VMM_GF100:
+ case NVIF_CLASS_VMM_GM200:
+ case NVIF_CLASS_VMM_GP100:
+ args.gf100.version = 0;
+ if (mem->mem.type & NVIF_MEM_VRAM)
+ args.gf100.vol = 0;
+ else
+ args.gf100.vol = 1;
+ args.gf100.ro = 0;
+ args.gf100.priv = 0;
+ args.gf100.kind = kind;
+ argc = sizeof(args.gf100);
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOSYS;
+ }
+
+ return nvif_vmm_raw_map(vmm, addr, range, PAGE_SHIFT,
+ &args, argc,
+ &mem->mem, bo_offset);
+}
+
+static int
+nouveau_uvma_region_sparse_unref(struct nouveau_uvma_region *reg)
+{
+ u64 addr = reg->va.addr;
+ u64 range = reg->va.range;
+
+ return nouveau_uvmm_vmm_sparse_unref(reg->uvmm, addr, range);
+}
+
+static int
+nouveau_uvma_vmm_put(struct nouveau_uvma *uvma)
+{
+ u64 addr = uvma->va.va.addr;
+ u64 range = uvma->va.va.range;
+
+ return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range);
+}
+
+static int
+nouveau_uvma_map(struct nouveau_uvma *uvma,
+ struct nouveau_mem *mem)
+{
+ u64 addr = uvma->va.va.addr;
+ u64 offset = uvma->va.gem.offset;
+ u64 range = uvma->va.va.range;
+
+ return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range,
+ offset, uvma->kind, mem);
+}
+
+static int
+nouveau_uvma_unmap(struct nouveau_uvma *uvma)
+{
+ u64 addr = uvma->va.va.addr;
+ u64 range = uvma->va.va.range;
+ bool sparse = !!uvma->region;
+
+ if (drm_gpuva_invalidated(&uvma->va))
+ return 0;
+
+ return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+}
+
+static int
+nouveau_uvma_alloc(struct nouveau_uvma **puvma)
+{
+ *puvma = kzalloc(sizeof(**puvma), GFP_KERNEL);
+ if (!*puvma)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+nouveau_uvma_free(struct nouveau_uvma *uvma)
+{
+ kfree(uvma);
+}
+
+static void
+nouveau_uvma_gem_get(struct nouveau_uvma *uvma)
+{
+ drm_gem_object_get(uvma->va.gem.obj);
+}
+
+static void
+nouveau_uvma_gem_put(struct nouveau_uvma *uvma)
+{
+ drm_gem_object_put(uvma->va.gem.obj);
+}
+
+static int
+nouveau_uvma_region_alloc(struct nouveau_uvma_region **preg)
+{
+ *preg = kzalloc(sizeof(**preg), GFP_KERNEL);
+ if (!*preg)
+ return -ENOMEM;
+
+ kref_init(&(*preg)->kref);
+
+ return 0;
+}
+
+static void
+nouveau_uvma_region_free(struct kref *kref)
+{
+ struct nouveau_uvma_region *reg =
+ container_of(kref, struct nouveau_uvma_region, kref);
+
+ kfree(reg);
+}
+
+static void
+nouveau_uvma_region_get(struct nouveau_uvma_region *reg)
+{
+ kref_get(&reg->kref);
+}
+
+static void
+nouveau_uvma_region_put(struct nouveau_uvma_region *reg)
+{
+ kref_put(&reg->kref, nouveau_uvma_region_free);
+}
+
+static int
+__nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_region *reg)
+{
+ u64 addr = reg->va.addr;
+ u64 range = reg->va.range;
+ u64 last = addr + range - 1;
+ MA_STATE(mas, &uvmm->region_mt, addr, addr);
+
+ if (unlikely(mas_walk(&mas)))
+ return -EEXIST;
+
+ if (unlikely(mas.last < last))
+ return -EEXIST;
+
+ mas.index = addr;
+ mas.last = last;
+
+ mas_store_gfp(&mas, reg, GFP_KERNEL);
+
+ reg->uvmm = uvmm;
+
+ return 0;
+}
+
+static int
+nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_region *reg,
+ u64 addr, u64 range)
+{
+ int ret;
+
+ reg->uvmm = uvmm;
+ reg->va.addr = addr;
+ reg->va.range = range;
+
+ ret = __nouveau_uvma_region_insert(uvmm, reg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+nouveau_uvma_region_remove(struct nouveau_uvma_region *reg)
+{
+ struct nouveau_uvmm *uvmm = reg->uvmm;
+ MA_STATE(mas, &uvmm->region_mt, reg->va.addr, 0);
+
+ mas_erase(&mas);
+}
+
+static int
+nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nouveau_uvma_region *reg;
+ int ret;
+
+ if (!drm_gpuva_interval_empty(&uvmm->umgr, addr, range))
+ return -ENOSPC;
+
+ ret = nouveau_uvma_region_alloc(&reg);
+ if (ret)
+ return ret;
+
+ ret = nouveau_uvma_region_insert(uvmm, reg, addr, range);
+ if (ret)
+ goto err_free_region;
+
+ ret = nouveau_uvmm_vmm_sparse_ref(uvmm, addr, range);
+ if (ret)
+ goto err_region_remove;
+
+ return 0;
+
+err_region_remove:
+ nouveau_uvma_region_remove(reg);
+err_free_region:
+ nouveau_uvma_region_put(reg);
+ return ret;
+}
+
+static struct nouveau_uvma_region *
+nouveau_uvma_region_find_first(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ MA_STATE(mas, &uvmm->region_mt, addr, 0);
+
+ return mas_find(&mas, addr + range - 1);
+}
+
+static struct nouveau_uvma_region *
+nouveau_uvma_region_find(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nouveau_uvma_region *reg;
+
+ reg = nouveau_uvma_region_find_first(uvmm, addr, range);
+ if (!reg)
+ return NULL;
+
+ if (reg->va.addr != addr ||
+ reg->va.range != range)
+ return NULL;
+
+ return reg;
+}
+
+static bool
+nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
+{
+ struct nouveau_uvmm *uvmm = reg->uvmm;
+
+ return drm_gpuva_interval_empty(&uvmm->umgr,
+ reg->va.addr,
+ reg->va.range);
+}
+
+static int
+__nouveau_uvma_region_destroy(struct nouveau_uvma_region *reg)
+{
+ struct nouveau_uvmm *uvmm = reg->uvmm;
+ u64 addr = reg->va.addr;
+ u64 range = reg->va.range;
+
+ if (!nouveau_uvma_region_empty(reg))
+ return -EBUSY;
+
+ nouveau_uvma_region_remove(reg);
+ nouveau_uvmm_vmm_sparse_unref(uvmm, addr, range);
+ nouveau_uvma_region_put(reg);
+
+ return 0;
+}
+
+static int
+nouveau_uvma_region_destroy(struct nouveau_uvmm *uvmm,
+ u64 addr, u64 range)
+{
+ struct nouveau_uvma_region *reg;
+
+ reg = nouveau_uvma_region_find(uvmm, addr, range);
+ if (!reg)
+ return -ENOENT;
+
+ return __nouveau_uvma_region_destroy(reg);
+}
+
+static void
+nouveau_uvma_region_dirty(struct nouveau_uvma_region *reg)
+{
+
+ init_completion(&reg->complete);
+ reg->dirty = true;
+}
+
+static void
+nouveau_uvma_region_complete(struct nouveau_uvma_region *reg)
+{
+ complete_all(&reg->complete);
+}
+
+static void
+op_map_prepare_unwind(struct nouveau_uvma *uvma)
+{
+ nouveau_uvma_gem_put(uvma);
+ drm_gpuva_remove(&uvma->va);
+ nouveau_uvma_free(uvma);
+}
+
+static void
+op_unmap_prepare_unwind(struct drm_gpuva *va)
+{
+ drm_gpuva_insert(va->mgr, va);
+}
+
+static void
+nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops,
+ struct drm_gpuva_op *last,
+ struct uvmm_map_args *args)
+{
+ struct drm_gpuva_op *op = last;
+ u64 vmm_get_start = args ? args->addr : 0;
+ u64 vmm_get_end = args ? args->addr + args->range : 0;
+
+ /* Unwind GPUVA space. */
+ drm_gpuva_for_each_op_from_reverse(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ op_map_prepare_unwind(new->map);
+ break;
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva_op_remap *r = &op->remap;
+
+ if (r->next)
+ op_map_prepare_unwind(new->next);
+
+ if (r->prev)
+ op_map_prepare_unwind(new->prev);
+
+ op_unmap_prepare_unwind(r->unmap->va);
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP:
+ op_unmap_prepare_unwind(op->unmap.va);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Unmap operation don't allocate page tables, hence skip the following
+ * page table unwind.
+ */
+ if (!args)
+ return;
+
+ drm_gpuva_for_each_op(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP: {
+ u64 vmm_get_range = vmm_get_end - vmm_get_start;
+
+ if (vmm_get_range)
+ nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
+ vmm_get_range);
+ break;
+ }
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva_op_remap *r = &op->remap;
+ struct drm_gpuva *va = r->unmap->va;
+ u64 ustart = va->va.addr;
+ u64 urange = va->va.range;
+ u64 uend = ustart + urange;
+
+ if (r->prev)
+ vmm_get_start = uend;
+
+ if (r->next)
+ vmm_get_end = ustart;
+
+ if (r->prev && r->next)
+ vmm_get_start = vmm_get_end = 0;
+
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP: {
+ struct drm_gpuva_op_unmap *u = &op->unmap;
+ struct drm_gpuva *va = u->va;
+ u64 ustart = va->va.addr;
+ u64 urange = va->va.range;
+ u64 uend = ustart + urange;
+
+ /* Nothing to do for mappings we merge with. */
+ if (uend == vmm_get_start ||
+ ustart == vmm_get_end)
+ break;
+
+ if (ustart > vmm_get_start) {
+ u64 vmm_get_range = ustart - vmm_get_start;
+
+ nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
+ vmm_get_range);
+ }
+ vmm_get_start = uend;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (op == last)
+ break;
+ }
+}
+
+static void
+nouveau_uvmm_sm_map_prepare_unwind(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops,
+ u64 addr, u64 range)
+{
+ struct drm_gpuva_op *last = drm_gpuva_last_op(ops);
+ struct uvmm_map_args args = {
+ .addr = addr,
+ .range = range,
+ };
+
+ nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, &args);
+}
+
+static void
+nouveau_uvmm_sm_unmap_prepare_unwind(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ struct drm_gpuva_op *last = drm_gpuva_last_op(ops);
+
+ nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, NULL);
+}
+
+static int
+op_map_prepare(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma **puvma,
+ struct drm_gpuva_op_map *op,
+ struct uvmm_map_args *args)
+{
+ struct nouveau_uvma *uvma;
+ int ret;
+
+ ret = nouveau_uvma_alloc(&uvma);
+ if (ret)
+ return ret;
+
+ uvma->region = args->region;
+ uvma->kind = args->kind;
+
+ drm_gpuva_map(&uvmm->umgr, &uvma->va, op);
+
+ /* Keep a reference until this uvma is destroyed. */
+ nouveau_uvma_gem_get(uvma);
+
+ *puvma = uvma;
+ return 0;
+}
+
+static void
+op_unmap_prepare(struct drm_gpuva_op_unmap *u)
+{
+ drm_gpuva_unmap(u);
+}
+
+static int
+nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops,
+ struct uvmm_map_args *args)
+{
+ struct drm_gpuva_op *op;
+ u64 vmm_get_start = args ? args->addr : 0;
+ u64 vmm_get_end = args ? args->addr + args->range : 0;
+ int ret;
+
+ drm_gpuva_for_each_op(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP: {
+ u64 vmm_get_range = vmm_get_end - vmm_get_start;
+
+ ret = op_map_prepare(uvmm, &new->map, &op->map, args);
+ if (ret)
+ goto unwind;
+
+ if (args && vmm_get_range) {
+ ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
+ vmm_get_range);
+ if (ret) {
+ op_map_prepare_unwind(new->map);
+ goto unwind;
+ }
+ }
+ break;
+ }
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva_op_remap *r = &op->remap;
+ struct drm_gpuva *va = r->unmap->va;
+ struct uvmm_map_args remap_args = {
+ .kind = uvma_from_va(va)->kind,
+ };
+ u64 ustart = va->va.addr;
+ u64 urange = va->va.range;
+ u64 uend = ustart + urange;
+
+ op_unmap_prepare(r->unmap);
+
+ if (r->prev) {
+ ret = op_map_prepare(uvmm, &new->prev, r->prev,
+ &remap_args);
+ if (ret)
+ goto unwind;
+
+ if (args)
+ vmm_get_start = uend;
+ }
+
+ if (r->next) {
+ ret = op_map_prepare(uvmm, &new->next, r->next,
+ &remap_args);
+ if (ret) {
+ if (r->prev)
+ op_map_prepare_unwind(new->prev);
+ goto unwind;
+ }
+
+ if (args)
+ vmm_get_end = ustart;
+ }
+
+ if (args && (r->prev && r->next))
+ vmm_get_start = vmm_get_end = 0;
+
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP: {
+ struct drm_gpuva_op_unmap *u = &op->unmap;
+ struct drm_gpuva *va = u->va;
+ u64 ustart = va->va.addr;
+ u64 urange = va->va.range;
+ u64 uend = ustart + urange;
+
+ op_unmap_prepare(u);
+
+ if (!args)
+ break;
+
+ /* Nothing to do for mappings we merge with. */
+ if (uend == vmm_get_start ||
+ ustart == vmm_get_end)
+ break;
+
+ if (ustart > vmm_get_start) {
+ u64 vmm_get_range = ustart - vmm_get_start;
+
+ ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
+ vmm_get_range);
+ if (ret) {
+ op_unmap_prepare_unwind(va);
+ goto unwind;
+ }
+ }
+ vmm_get_start = uend;
+
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ goto unwind;
+ }
+ }
+
+ return 0;
+
+unwind:
+ if (op != drm_gpuva_first_op(ops))
+ nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops,
+ drm_gpuva_prev_op(op),
+ args);
+ return ret;
+}
+
+static int
+nouveau_uvmm_sm_map_prepare(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct nouveau_uvma_region *region,
+ struct drm_gpuva_ops *ops,
+ u64 addr, u64 range, u8 kind)
+{
+ struct uvmm_map_args args = {
+ .region = region,
+ .addr = addr,
+ .range = range,
+ .kind = kind,
+ };
+
+ return nouveau_uvmm_sm_prepare(uvmm, new, ops, &args);
+}
+
+static int
+nouveau_uvmm_sm_unmap_prepare(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ return nouveau_uvmm_sm_prepare(uvmm, new, ops, NULL);
+}
+
+static struct drm_gem_object *
+op_gem_obj(struct drm_gpuva_op *op)
+{
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ return op->map.gem.obj;
+ case DRM_GPUVA_OP_REMAP:
+ /* Actually, we're looking for the GEMs backing remap.prev and
+ * remap.next, but since this is a remap they're identical to
+ * the GEM backing the unmapped GPUVA.
+ */
+ return op->remap.unmap->va->gem.obj;
+ case DRM_GPUVA_OP_UNMAP:
+ return op->unmap.va->gem.obj;
+ default:
+ WARN(1, "Unknown operation.\n");
+ return NULL;
+ }
+}
+
+static void
+op_map(struct nouveau_uvma *uvma)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(uvma->va.gem.obj);
+
+ nouveau_uvma_map(uvma, nouveau_mem(nvbo->bo.resource));
+}
+
+static void
+op_unmap(struct drm_gpuva_op_unmap *u)
+{
+ struct drm_gpuva *va = u->va;
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+
+ /* nouveau_uvma_unmap() does not unmap if backing BO is evicted. */
+ if (!u->keep)
+ nouveau_uvma_unmap(uvma);
+}
+
+static void
+op_unmap_range(struct drm_gpuva_op_unmap *u,
+ u64 addr, u64 range)
+{
+ struct nouveau_uvma *uvma = uvma_from_va(u->va);
+ bool sparse = !!uvma->region;
+
+ if (!drm_gpuva_invalidated(u->va))
+ nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+}
+
+static void
+op_remap(struct drm_gpuva_op_remap *r,
+ struct nouveau_uvma_prealloc *new)
+{
+ struct drm_gpuva_op_unmap *u = r->unmap;
+ struct nouveau_uvma *uvma = uvma_from_va(u->va);
+ u64 addr = uvma->va.va.addr;
+ u64 range = uvma->va.va.range;
+
+ if (r->prev)
+ addr = r->prev->va.addr + r->prev->va.range;
+
+ if (r->next)
+ range = r->next->va.addr - addr;
+
+ op_unmap_range(u, addr, range);
+}
+
+static int
+nouveau_uvmm_sm(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ struct drm_gpuva_op *op;
+
+ drm_gpuva_for_each_op(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ op_map(new->map);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ op_remap(&op->remap, new);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ op_unmap(&op->unmap);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nouveau_uvmm_sm_map(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ return nouveau_uvmm_sm(uvmm, new, ops);
+}
+
+static int
+nouveau_uvmm_sm_unmap(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ return nouveau_uvmm_sm(uvmm, new, ops);
+}
+
+static void
+nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops, bool unmap)
+{
+ struct drm_gpuva_op *op;
+
+ drm_gpuva_for_each_op(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ break;
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva_op_remap *r = &op->remap;
+ struct drm_gpuva_op_map *p = r->prev;
+ struct drm_gpuva_op_map *n = r->next;
+ struct drm_gpuva *va = r->unmap->va;
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+
+ if (unmap) {
+ u64 addr = va->va.addr;
+ u64 end = addr + va->va.range;
+
+ if (p)
+ addr = p->va.addr + p->va.range;
+
+ if (n)
+ end = n->va.addr;
+
+ nouveau_uvmm_vmm_put(uvmm, addr, end - addr);
+ }
+
+ nouveau_uvma_gem_put(uvma);
+ nouveau_uvma_free(uvma);
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP: {
+ struct drm_gpuva_op_unmap *u = &op->unmap;
+ struct drm_gpuva *va = u->va;
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+
+ if (unmap)
+ nouveau_uvma_vmm_put(uvma);
+
+ nouveau_uvma_gem_put(uvma);
+ nouveau_uvma_free(uvma);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+static void
+nouveau_uvmm_sm_map_cleanup(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ nouveau_uvmm_sm_cleanup(uvmm, new, ops, false);
+}
+
+static void
+nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm,
+ struct nouveau_uvma_prealloc *new,
+ struct drm_gpuva_ops *ops)
+{
+ nouveau_uvmm_sm_cleanup(uvmm, new, ops, true);
+}
+
+static int
+nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
+{
+ u64 end = addr + range;
+ u64 kernel_managed_end = uvmm->kernel_managed_addr +
+ uvmm->kernel_managed_size;
+
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (range & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (end <= addr)
+ return -EINVAL;
+
+ if (addr < NOUVEAU_VA_SPACE_START ||
+ end > NOUVEAU_VA_SPACE_END)
+ return -EINVAL;
+
+ if (addr < kernel_managed_end &&
+ end > uvmm->kernel_managed_addr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nouveau_uvmm_bind_job_alloc(struct nouveau_uvmm_bind_job **pjob)
+{
+ *pjob = kzalloc(sizeof(**pjob), GFP_KERNEL);
+ if (!*pjob)
+ return -ENOMEM;
+
+ kref_init(&(*pjob)->kref);
+
+ return 0;
+}
+
+static void
+nouveau_uvmm_bind_job_free(struct kref *kref)
+{
+ struct nouveau_uvmm_bind_job *job =
+ container_of(kref, struct nouveau_uvmm_bind_job, kref);
+
+ nouveau_job_free(&job->base);
+ kfree(job);
+}
+
+static void
+nouveau_uvmm_bind_job_get(struct nouveau_uvmm_bind_job *job)
+{
+ kref_get(&job->kref);
+}
+
+static void
+nouveau_uvmm_bind_job_put(struct nouveau_uvmm_bind_job *job)
+{
+ kref_put(&job->kref, nouveau_uvmm_bind_job_free);
+}
+
+static int
+bind_validate_op(struct nouveau_job *job,
+ struct bind_job_op *op)
+{
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+ struct drm_gem_object *obj = op->gem.obj;
+
+ if (op->op == OP_MAP) {
+ if (op->gem.offset & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (obj->size <= op->gem.offset)
+ return -EINVAL;
+
+ if (op->va.range > (obj->size - op->gem.offset))
+ return -EINVAL;
+ }
+
+ return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range);
+}
+
+static void
+bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
+{
+ struct nouveau_uvmm_bind_job *bind_job;
+ struct nouveau_sched_entity *entity = job->entity;
+ struct bind_job_op *op;
+ u64 end = addr + range;
+
+again:
+ spin_lock(&entity->job.list.lock);
+ list_for_each_entry(bind_job, &entity->job.list.head, entry) {
+ list_for_each_op(op, &bind_job->ops) {
+ if (op->op == OP_UNMAP) {
+ u64 op_addr = op->va.addr;
+ u64 op_end = op_addr + op->va.range;
+
+ if (!(end <= op_addr || addr >= op_end)) {
+ nouveau_uvmm_bind_job_get(bind_job);
+ spin_unlock(&entity->job.list.lock);
+ wait_for_completion(&bind_job->complete);
+ nouveau_uvmm_bind_job_put(bind_job);
+ goto again;
+ }
+ }
+ }
+ }
+ spin_unlock(&entity->job.list.lock);
+}
+
+static int
+bind_validate_map_common(struct nouveau_job *job, u64 addr, u64 range,
+ bool sparse)
+{
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+ struct nouveau_uvma_region *reg;
+ u64 reg_addr, reg_end;
+ u64 end = addr + range;
+
+again:
+ nouveau_uvmm_lock(uvmm);
+ reg = nouveau_uvma_region_find_first(uvmm, addr, range);
+ if (!reg) {
+ nouveau_uvmm_unlock(uvmm);
+ return 0;
+ }
+
+ /* Generally, job submits are serialized, hence only
+ * dirty regions can be modified concurrently.
+ */
+ if (reg->dirty) {
+ nouveau_uvma_region_get(reg);
+ nouveau_uvmm_unlock(uvmm);
+ wait_for_completion(&reg->complete);
+ nouveau_uvma_region_put(reg);
+ goto again;
+ }
+ nouveau_uvmm_unlock(uvmm);
+
+ if (sparse)
+ return -ENOSPC;
+
+ reg_addr = reg->va.addr;
+ reg_end = reg_addr + reg->va.range;
+
+ /* Make sure the mapping is either outside of a
+ * region or fully enclosed by a region.
+ */
+ if (reg_addr > addr || reg_end < end)
+ return -ENOSPC;
+
+ return 0;
+}
+
+static int
+bind_validate_region(struct nouveau_job *job)
+{
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct bind_job_op *op;
+ int ret;
+
+ list_for_each_op(op, &bind_job->ops) {
+ u64 op_addr = op->va.addr;
+ u64 op_range = op->va.range;
+ bool sparse = false;
+
+ switch (op->op) {
+ case OP_MAP_SPARSE:
+ sparse = true;
+ bind_validate_map_sparse(job, op_addr, op_range);
+ fallthrough;
+ case OP_MAP:
+ ret = bind_validate_map_common(job, op_addr, op_range,
+ sparse);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void
+bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
+{
+ struct drm_gpuva_op *op;
+
+ drm_gpuva_for_each_op(op, ops) {
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ drm_gpuva_link(&new->map->va);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.prev)
+ drm_gpuva_link(&new->prev->va);
+ if (op->remap.next)
+ drm_gpuva_link(&new->next->va);
+ drm_gpuva_unlink(op->remap.unmap->va);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ drm_gpuva_unlink(op->unmap.va);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int
+nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
+{
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct nouveau_sched_entity *entity = job->entity;
+ struct drm_exec *exec = &job->exec;
+ struct bind_job_op *op;
+ int ret;
+
+ list_for_each_op(op, &bind_job->ops) {
+ if (op->op == OP_MAP) {
+ op->gem.obj = drm_gem_object_lookup(job->file_priv,
+ op->gem.handle);
+ if (!op->gem.obj)
+ return -ENOENT;
+ }
+
+ ret = bind_validate_op(job, op);
+ if (ret)
+ return ret;
+ }
+
+ /* If a sparse region or mapping overlaps a dirty region, we need to
+ * wait for the region to complete the unbind process. This is due to
+ * how page table management is currently implemented. A future
+ * implementation might change this.
+ */
+ ret = bind_validate_region(job);
+ if (ret)
+ return ret;
+
+ /* Once we start modifying the GPU VA space we need to keep holding the
+ * uvmm lock until we can't fail anymore. This is due to the set of GPU
+ * VA space changes must appear atomically and we need to be able to
+ * unwind all GPU VA space changes on failure.
+ */
+ nouveau_uvmm_lock(uvmm);
+ list_for_each_op(op, &bind_job->ops) {
+ switch (op->op) {
+ case OP_MAP_SPARSE:
+ ret = nouveau_uvma_region_create(uvmm,
+ op->va.addr,
+ op->va.range);
+ if (ret)
+ goto unwind_continue;
+
+ break;
+ case OP_UNMAP_SPARSE:
+ op->reg = nouveau_uvma_region_find(uvmm, op->va.addr,
+ op->va.range);
+ if (!op->reg || op->reg->dirty) {
+ ret = -ENOENT;
+ goto unwind_continue;
+ }
+
+ op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
+ op->va.addr,
+ op->va.range);
+ if (IS_ERR(op->ops)) {
+ ret = PTR_ERR(op->ops);
+ goto unwind_continue;
+ }
+
+ ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
+ op->ops);
+ if (ret) {
+ drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ op->ops = NULL;
+ op->reg = NULL;
+ goto unwind_continue;
+ }
+
+ nouveau_uvma_region_dirty(op->reg);
+
+ break;
+ case OP_MAP: {
+ struct nouveau_uvma_region *reg;
+
+ reg = nouveau_uvma_region_find_first(uvmm,
+ op->va.addr,
+ op->va.range);
+ if (reg) {
+ u64 reg_addr = reg->va.addr;
+ u64 reg_end = reg_addr + reg->va.range;
+ u64 op_addr = op->va.addr;
+ u64 op_end = op_addr + op->va.range;
+
+ if (unlikely(reg->dirty)) {
+ ret = -EINVAL;
+ goto unwind_continue;
+ }
+
+ /* Make sure the mapping is either outside of a
+ * region or fully enclosed by a region.
+ */
+ if (reg_addr > op_addr || reg_end < op_end) {
+ ret = -ENOSPC;
+ goto unwind_continue;
+ }
+ }
+
+ op->ops = drm_gpuva_sm_map_ops_create(&uvmm->umgr,
+ op->va.addr,
+ op->va.range,
+ op->gem.obj,
+ op->gem.offset);
+ if (IS_ERR(op->ops)) {
+ ret = PTR_ERR(op->ops);
+ goto unwind_continue;
+ }
+
+ ret = nouveau_uvmm_sm_map_prepare(uvmm, &op->new,
+ reg, op->ops,
+ op->va.addr,
+ op->va.range,
+ op->flags & 0xff);
+ if (ret) {
+ drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ op->ops = NULL;
+ goto unwind_continue;
+ }
+
+ break;
+ }
+ case OP_UNMAP:
+ op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
+ op->va.addr,
+ op->va.range);
+ if (IS_ERR(op->ops)) {
+ ret = PTR_ERR(op->ops);
+ goto unwind_continue;
+ }
+
+ ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
+ op->ops);
+ if (ret) {
+ drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ op->ops = NULL;
+ goto unwind_continue;
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ goto unwind_continue;
+ }
+ }
+
+ drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_until_all_locked(exec) {
+ list_for_each_op(op, &bind_job->ops) {
+ struct drm_gpuva_op *va_op;
+
+ if (IS_ERR_OR_NULL(op->ops))
+ continue;
+
+ drm_gpuva_for_each_op(va_op, op->ops) {
+ struct drm_gem_object *obj = op_gem_obj(va_op);
+
+ if (unlikely(!obj))
+ continue;
+
+ ret = drm_exec_prepare_obj(exec, obj, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret) {
+ op = list_last_op(&bind_job->ops);
+ goto unwind;
+ }
+ }
+ }
+ }
+
+ list_for_each_op(op, &bind_job->ops) {
+ struct drm_gpuva_op *va_op;
+
+ if (IS_ERR_OR_NULL(op->ops))
+ continue;
+
+ drm_gpuva_for_each_op(va_op, op->ops) {
+ struct drm_gem_object *obj = op_gem_obj(va_op);
+
+ if (unlikely(!obj))
+ continue;
+
+ /* Don't validate GEMs backing mappings we're about to
+ * unmap, it's not worth the effort.
+ */
+ if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
+ continue;
+
+ ret = nouveau_bo_validate(nouveau_gem_object(obj),
+ true, false);
+ if (ret) {
+ op = list_last_op(&bind_job->ops);
+ goto unwind;
+ }
+ }
+ }
+
+ /* Link and unlink GPUVAs while holding the dma_resv lock.
+ *
+ * As long as we validate() all GEMs and add fences to all GEMs DMA
+ * reservations backing map and remap operations we can be sure there
+ * won't be any concurrent (in)validations during job execution, hence
+ * we're safe to check drm_gpuva_invalidated() within the fence
+ * signalling critical path without holding a separate lock.
+ *
+ * GPUVAs about to be unmapped are safe as well, since they're unlinked
+ * already.
+ *
+ * GEMs from map and remap operations must be validated before linking
+ * their corresponding mappings to prevent the actual PT update to
+ * happen right away in validate() rather than asynchronously as
+ * intended.
+ *
+ * Note that after linking and unlinking the GPUVAs in this loop this
+ * function cannot fail anymore, hence there is no need for an unwind
+ * path.
+ */
+ list_for_each_op(op, &bind_job->ops) {
+ switch (op->op) {
+ case OP_UNMAP_SPARSE:
+ case OP_MAP:
+ case OP_UNMAP:
+ bind_link_gpuvas(op->ops, &op->new);
+ break;
+ default:
+ break;
+ }
+ }
+ nouveau_uvmm_unlock(uvmm);
+
+ spin_lock(&entity->job.list.lock);
+ list_add(&bind_job->entry, &entity->job.list.head);
+ spin_unlock(&entity->job.list.lock);
+
+ return 0;
+
+unwind_continue:
+ op = list_prev_op(op);
+unwind:
+ list_for_each_op_from_reverse(op, &bind_job->ops) {
+ switch (op->op) {
+ case OP_MAP_SPARSE:
+ nouveau_uvma_region_destroy(uvmm, op->va.addr,
+ op->va.range);
+ break;
+ case OP_UNMAP_SPARSE:
+ __nouveau_uvma_region_insert(uvmm, op->reg);
+ nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
+ op->ops);
+ break;
+ case OP_MAP:
+ nouveau_uvmm_sm_map_prepare_unwind(uvmm, &op->new,
+ op->ops,
+ op->va.addr,
+ op->va.range);
+ break;
+ case OP_UNMAP:
+ nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
+ op->ops);
+ break;
+ }
+
+ drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ op->ops = NULL;
+ op->reg = NULL;
+ }
+
+ nouveau_uvmm_unlock(uvmm);
+ drm_exec_fini(exec);
+ return ret;
+}
+
+static void
+nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job)
+{
+ struct drm_exec *exec = &job->exec;
+ struct drm_gem_object *obj;
+ unsigned long index;
+
+ drm_exec_for_each_locked_object(exec, index, obj)
+ dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
+
+ drm_exec_fini(exec);
+}
+
+static struct dma_fence *
+nouveau_uvmm_bind_job_run(struct nouveau_job *job)
+{
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+ struct bind_job_op *op;
+ int ret = 0;
+
+ list_for_each_op(op, &bind_job->ops) {
+ switch (op->op) {
+ case OP_MAP_SPARSE:
+ /* noop */
+ break;
+ case OP_MAP:
+ ret = nouveau_uvmm_sm_map(uvmm, &op->new, op->ops);
+ if (ret)
+ goto out;
+ break;
+ case OP_UNMAP_SPARSE:
+ fallthrough;
+ case OP_UNMAP:
+ ret = nouveau_uvmm_sm_unmap(uvmm, &op->new, op->ops);
+ if (ret)
+ goto out;
+ break;
+ }
+ }
+
+out:
+ if (ret)
+ NV_PRINTK(err, job->cli, "bind job failed: %d\n", ret);
+ return ERR_PTR(ret);
+}
+
+static void
+nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
+{
+ struct nouveau_uvmm_bind_job *bind_job =
+ container_of(work, struct nouveau_uvmm_bind_job, work);
+ struct nouveau_job *job = &bind_job->base;
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
+ struct nouveau_sched_entity *entity = job->entity;
+ struct bind_job_op *op, *next;
+
+ list_for_each_op(op, &bind_job->ops) {
+ struct drm_gem_object *obj = op->gem.obj;
+
+ /* When nouveau_uvmm_bind_job_submit() fails op->ops and op->reg
+ * will be NULL, hence skip the cleanup.
+ */
+ switch (op->op) {
+ case OP_MAP_SPARSE:
+ /* noop */
+ break;
+ case OP_UNMAP_SPARSE:
+ if (!IS_ERR_OR_NULL(op->ops))
+ nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
+ op->ops);
+
+ if (op->reg) {
+ nouveau_uvma_region_sparse_unref(op->reg);
+ nouveau_uvmm_lock(uvmm);
+ nouveau_uvma_region_remove(op->reg);
+ nouveau_uvmm_unlock(uvmm);
+ nouveau_uvma_region_complete(op->reg);
+ nouveau_uvma_region_put(op->reg);
+ }
+
+ break;
+ case OP_MAP:
+ if (!IS_ERR_OR_NULL(op->ops))
+ nouveau_uvmm_sm_map_cleanup(uvmm, &op->new,
+ op->ops);
+ break;
+ case OP_UNMAP:
+ if (!IS_ERR_OR_NULL(op->ops))
+ nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
+ op->ops);
+ break;
+ }
+
+ if (!IS_ERR_OR_NULL(op->ops))
+ drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+
+ if (obj)
+ drm_gem_object_put(obj);
+ }
+
+ spin_lock(&entity->job.list.lock);
+ list_del(&bind_job->entry);
+ spin_unlock(&entity->job.list.lock);
+
+ complete_all(&bind_job->complete);
+ wake_up(&entity->job.wq);
+
+ /* Remove and free ops after removing the bind job from the job list to
+ * avoid races against bind_validate_map_sparse().
+ */
+ list_for_each_op_safe(op, next, &bind_job->ops) {
+ list_del(&op->entry);
+ kfree(op);
+ }
+
+ nouveau_uvmm_bind_job_put(bind_job);
+}
+
+static void
+nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job)
+{
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct nouveau_sched_entity *entity = job->entity;
+
+ nouveau_sched_entity_qwork(entity, &bind_job->work);
+}
+
+static struct nouveau_job_ops nouveau_bind_job_ops = {
+ .submit = nouveau_uvmm_bind_job_submit,
+ .armed_submit = nouveau_uvmm_bind_job_armed_submit,
+ .run = nouveau_uvmm_bind_job_run,
+ .free = nouveau_uvmm_bind_job_free_qwork,
+};
+
+static int
+bind_job_op_from_uop(struct bind_job_op **pop,
+ struct drm_nouveau_vm_bind_op *uop)
+{
+ struct bind_job_op *op;
+
+ op = *pop = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ switch (uop->op) {
+ case OP_MAP:
+ op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ?
+ OP_MAP_SPARSE : OP_MAP;
+ break;
+ case OP_UNMAP:
+ op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ?
+ OP_UNMAP_SPARSE : OP_UNMAP;
+ break;
+ default:
+ op->op = uop->op;
+ break;
+ }
+
+ op->flags = uop->flags;
+ op->va.addr = uop->addr;
+ op->va.range = uop->range;
+ op->gem.handle = uop->handle;
+ op->gem.offset = uop->bo_offset;
+
+ return 0;
+}
+
+static void
+bind_job_ops_free(struct list_head *ops)
+{
+ struct bind_job_op *op, *next;
+
+ list_for_each_op_safe(op, next, ops) {
+ list_del(&op->entry);
+ kfree(op);
+ }
+}
+
+static int
+nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
+ struct nouveau_uvmm_bind_job_args *__args)
+{
+ struct nouveau_uvmm_bind_job *job;
+ struct nouveau_job_args args = {};
+ struct bind_job_op *op;
+ int i, ret;
+
+ ret = nouveau_uvmm_bind_job_alloc(&job);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&job->ops);
+ INIT_LIST_HEAD(&job->entry);
+
+ for (i = 0; i < __args->op.count; i++) {
+ ret = bind_job_op_from_uop(&op, &__args->op.s[i]);
+ if (ret)
+ goto err_free;
+
+ list_add_tail(&op->entry, &job->ops);
+ }
+
+ init_completion(&job->complete);
+ INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn);
+
+ args.sched_entity = __args->sched_entity;
+ args.file_priv = __args->file_priv;
+
+ args.in_sync.count = __args->in_sync.count;
+ args.in_sync.s = __args->in_sync.s;
+
+ args.out_sync.count = __args->out_sync.count;
+ args.out_sync.s = __args->out_sync.s;
+
+ args.sync = !(__args->flags & DRM_NOUVEAU_VM_BIND_RUN_ASYNC);
+ args.ops = &nouveau_bind_job_ops;
+ args.resv_usage = DMA_RESV_USAGE_BOOKKEEP;
+
+ ret = nouveau_job_init(&job->base, &args);
+ if (ret)
+ goto err_free;
+
+ *pjob = job;
+ return 0;
+
+err_free:
+ bind_job_ops_free(&job->ops);
+ kfree(job);
+ *pjob = NULL;
+
+ return ret;
+}
+
+int
+nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct drm_nouveau_vm_init *init = data;
+
+ return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
+ init->kernel_managed_size);
+}
+
+static int
+nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
+{
+ struct nouveau_uvmm_bind_job *job;
+ int ret;
+
+ ret = nouveau_uvmm_bind_job_init(&job, args);
+ if (ret)
+ return ret;
+
+ ret = nouveau_job_submit(&job->base);
+ if (ret)
+ goto err_job_fini;
+
+ return 0;
+
+err_job_fini:
+ nouveau_job_fini(&job->base);
+ return ret;
+}
+
+static int
+nouveau_uvmm_vm_bind_ucopy(struct nouveau_uvmm_bind_job_args *args,
+ struct drm_nouveau_vm_bind *req)
+{
+ struct drm_nouveau_sync **s;
+ u32 inc = req->wait_count;
+ u64 ins = req->wait_ptr;
+ u32 outc = req->sig_count;
+ u64 outs = req->sig_ptr;
+ u32 opc = req->op_count;
+ u64 ops = req->op_ptr;
+ int ret;
+
+ args->flags = req->flags;
+
+ if (opc) {
+ args->op.count = opc;
+ args->op.s = u_memcpya(ops, opc,
+ sizeof(*args->op.s));
+ if (IS_ERR(args->op.s))
+ return PTR_ERR(args->op.s);
+ }
+
+ if (inc) {
+ s = &args->in_sync.s;
+
+ args->in_sync.count = inc;
+ *s = u_memcpya(ins, inc, sizeof(**s));
+ if (IS_ERR(*s)) {
+ ret = PTR_ERR(*s);
+ goto err_free_ops;
+ }
+ }
+
+ if (outc) {
+ s = &args->out_sync.s;
+
+ args->out_sync.count = outc;
+ *s = u_memcpya(outs, outc, sizeof(**s));
+ if (IS_ERR(*s)) {
+ ret = PTR_ERR(*s);
+ goto err_free_ins;
+ }
+ }
+
+ return 0;
+
+err_free_ops:
+ u_free(args->op.s);
+err_free_ins:
+ u_free(args->in_sync.s);
+ return ret;
+}
+
+static void
+nouveau_uvmm_vm_bind_ufree(struct nouveau_uvmm_bind_job_args *args)
+{
+ u_free(args->op.s);
+ u_free(args->in_sync.s);
+ u_free(args->out_sync.s);
+}
+
+int
+nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct nouveau_uvmm_bind_job_args args = {};
+ struct drm_nouveau_vm_bind *req = data;
+ int ret = 0;
+
+ if (unlikely(!nouveau_cli_uvmm_locked(cli)))
+ return -ENOSYS;
+
+ ret = nouveau_uvmm_vm_bind_ucopy(&args, req);
+ if (ret)
+ return ret;
+
+ args.sched_entity = &cli->sched_entity;
+ args.file_priv = file_priv;
+
+ ret = nouveau_uvmm_vm_bind(&args);
+ if (ret)
+ goto out_free_args;
+
+out_free_args:
+ nouveau_uvmm_vm_bind_ufree(&args);
+ return ret;
+}
+
+void
+nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem)
+{
+ struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuva *va;
+
+ dma_resv_assert_held(obj->resv);
+
+ drm_gem_for_each_gpuva(va, obj) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+
+ nouveau_uvma_map(uvma, mem);
+ drm_gpuva_invalidate(va, false);
+ }
+}
+
+void
+nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
+{
+ struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuva *va;
+
+ dma_resv_assert_held(obj->resv);
+
+ drm_gem_for_each_gpuva(va, obj) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+
+ nouveau_uvma_unmap(uvma);
+ drm_gpuva_invalidate(va, true);
+ }
+}
+
+int
+nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
+ u64 kernel_managed_addr, u64 kernel_managed_size)
+{
+ int ret;
+ u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
+
+ mutex_init(&uvmm->mutex);
+ dma_resv_init(&uvmm->resv);
+ mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
+
+ mutex_lock(&cli->mutex);
+
+ if (unlikely(cli->uvmm.disabled)) {
+ ret = -ENOSYS;
+ goto out_unlock;
+ }
+
+ if (kernel_managed_end <= kernel_managed_addr) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (kernel_managed_end > NOUVEAU_VA_SPACE_END) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ uvmm->kernel_managed_addr = kernel_managed_addr;
+ uvmm->kernel_managed_size = kernel_managed_size;
+
+ drm_gpuva_manager_init(&uvmm->umgr, cli->name,
+ NOUVEAU_VA_SPACE_START,
+ NOUVEAU_VA_SPACE_END,
+ kernel_managed_addr, kernel_managed_size,
+ NULL);
+
+ ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
+ cli->vmm.vmm.object.oclass, RAW,
+ kernel_managed_addr, kernel_managed_size,
+ NULL, 0, &cli->uvmm.vmm.vmm);
+ if (ret)
+ goto out_free_gpuva_mgr;
+
+ cli->uvmm.vmm.cli = cli;
+ mutex_unlock(&cli->mutex);
+
+ return 0;
+
+out_free_gpuva_mgr:
+ drm_gpuva_manager_destroy(&uvmm->umgr);
+out_unlock:
+ mutex_unlock(&cli->mutex);
+ return ret;
+}
+
+void
+nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
+{
+ MA_STATE(mas, &uvmm->region_mt, 0, 0);
+ struct nouveau_uvma_region *reg;
+ struct nouveau_cli *cli = uvmm->vmm.cli;
+ struct nouveau_sched_entity *entity = &cli->sched_entity;
+ struct drm_gpuva *va, *next;
+
+ if (!cli)
+ return;
+
+ rmb(); /* for list_empty to work without lock */
+ wait_event(entity->job.wq, list_empty(&entity->job.list.head));
+
+ nouveau_uvmm_lock(uvmm);
+ drm_gpuva_for_each_va_safe(va, next, &uvmm->umgr) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
+ struct drm_gem_object *obj = va->gem.obj;
+
+ if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+ continue;
+
+ drm_gpuva_remove(va);
+
+ dma_resv_lock(obj->resv, NULL);
+ drm_gpuva_unlink(va);
+ dma_resv_unlock(obj->resv);
+
+ nouveau_uvma_unmap(uvma);
+ nouveau_uvma_vmm_put(uvma);
+
+ nouveau_uvma_gem_put(uvma);
+ nouveau_uvma_free(uvma);
+ }
+
+ mas_for_each(&mas, reg, ULONG_MAX) {
+ mas_erase(&mas);
+ nouveau_uvma_region_sparse_unref(reg);
+ nouveau_uvma_region_put(reg);
+ }
+
+ WARN(!mtree_empty(&uvmm->region_mt),
+ "nouveau_uvma_region tree not empty, potentially leaking memory.");
+ __mt_destroy(&uvmm->region_mt);
+ nouveau_uvmm_unlock(uvmm);
+
+ mutex_lock(&cli->mutex);
+ nouveau_vmm_fini(&uvmm->vmm);
+ drm_gpuva_manager_destroy(&uvmm->umgr);
+ mutex_unlock(&cli->mutex);
+
+ dma_resv_fini(&uvmm->resv);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
new file mode 100644
index 000000000000..fc7f6fd2a4e1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __NOUVEAU_UVMM_H__
+#define __NOUVEAU_UVMM_H__
+
+#include <drm/drm_gpuva_mgr.h>
+
+#include "nouveau_drv.h"
+
+struct nouveau_uvmm {
+ struct nouveau_vmm vmm;
+ struct drm_gpuva_manager umgr;
+ struct maple_tree region_mt;
+ struct mutex mutex;
+ struct dma_resv resv;
+
+ u64 kernel_managed_addr;
+ u64 kernel_managed_size;
+
+ bool disabled;
+};
+
+struct nouveau_uvma_region {
+ struct nouveau_uvmm *uvmm;
+
+ struct {
+ u64 addr;
+ u64 range;
+ } va;
+
+ struct kref kref;
+
+ struct completion complete;
+ bool dirty;
+};
+
+struct nouveau_uvma {
+ struct drm_gpuva va;
+
+ struct nouveau_uvma_region *region;
+ u8 kind;
+};
+
+#define uvmm_from_mgr(x) container_of((x), struct nouveau_uvmm, umgr)
+#define uvma_from_va(x) container_of((x), struct nouveau_uvma, va)
+
+#define to_uvmm(x) uvmm_from_mgr((x)->va.mgr)
+
+struct nouveau_uvmm_bind_job {
+ struct nouveau_job base;
+
+ struct kref kref;
+ struct list_head entry;
+ struct work_struct work;
+ struct completion complete;
+
+ /* struct bind_job_op */
+ struct list_head ops;
+};
+
+struct nouveau_uvmm_bind_job_args {
+ struct drm_file *file_priv;
+ struct nouveau_sched_entity *sched_entity;
+
+ unsigned int flags;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } in_sync;
+
+ struct {
+ struct drm_nouveau_sync *s;
+ u32 count;
+ } out_sync;
+
+ struct {
+ struct drm_nouveau_vm_bind_op *s;
+ u32 count;
+ } op;
+};
+
+#define to_uvmm_bind_job(job) container_of((job), struct nouveau_uvmm_bind_job, base)
+
+int nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
+ u64 kernel_managed_addr, u64 kernel_managed_size);
+void nouveau_uvmm_fini(struct nouveau_uvmm *uvmm);
+
+void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbov, struct nouveau_mem *mem);
+void nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo);
+
+int nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+int nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+static inline void nouveau_uvmm_lock(struct nouveau_uvmm *uvmm)
+{
+ mutex_lock(&uvmm->mutex);
+}
+
+static inline void nouveau_uvmm_unlock(struct nouveau_uvmm *uvmm)
+{
+ mutex_unlock(&uvmm->mutex);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 67d6619fcd5e..a6602c012671 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -128,8 +128,8 @@ nouveau_vmm_fini(struct nouveau_vmm *vmm)
int
nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
{
- int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, false, PAGE_SIZE,
- 0, NULL, 0, &vmm->vmm);
+ int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, UNMANAGED,
+ PAGE_SIZE, 0, NULL, 0, &vmm->vmm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
index 3709cbbc19a1..c9dd3cff49a0 100644
--- a/drivers/gpu/drm/nouveau/nvif/mmu.c
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -27,6 +27,9 @@
void
nvif_mmu_dtor(struct nvif_mmu *mmu)
{
+ if (!nvif_object_constructed(&mmu->object))
+ return;
+
kfree(mmu->kind);
kfree(mmu->type);
kfree(mmu->heap);
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
index 6053d6dc2184..99296f03371a 100644
--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -104,6 +104,90 @@ nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
return ret;
}
+int
+nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size,
+ u8 shift)
+{
+ struct nvif_vmm_raw_v0 args = {
+ .version = 0,
+ .op = NVIF_VMM_RAW_V0_GET,
+ .addr = addr,
+ .size = size,
+ .shift = shift,
+ };
+
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+ &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift)
+{
+ struct nvif_vmm_raw_v0 args = {
+ .version = 0,
+ .op = NVIF_VMM_RAW_V0_PUT,
+ .addr = addr,
+ .size = size,
+ .shift = shift,
+ };
+
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+ &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
+ void *argv, u32 argc, struct nvif_mem *mem, u64 offset)
+{
+ struct nvif_vmm_raw_v0 args = {
+ .version = 0,
+ .op = NVIF_VMM_RAW_V0_MAP,
+ .addr = addr,
+ .size = size,
+ .shift = shift,
+ .memory = nvif_handle(&mem->object),
+ .offset = offset,
+ .argv = (u64)(uintptr_t)argv,
+ .argc = argc,
+ };
+
+
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+ &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
+ u8 shift, bool sparse)
+{
+ struct nvif_vmm_raw_v0 args = {
+ .version = 0,
+ .op = NVIF_VMM_RAW_V0_UNMAP,
+ .addr = addr,
+ .size = size,
+ .shift = shift,
+ .sparse = sparse,
+ };
+
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+ &args, sizeof(args));
+}
+
+int
+nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref)
+{
+ struct nvif_vmm_raw_v0 args = {
+ .version = 0,
+ .op = NVIF_VMM_RAW_V0_SPARSE,
+ .addr = addr,
+ .size = size,
+ .ref = ref,
+ };
+
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
+ &args, sizeof(args));
+}
+
void
nvif_vmm_dtor(struct nvif_vmm *vmm)
{
@@ -112,8 +196,9 @@ nvif_vmm_dtor(struct nvif_vmm *vmm)
}
int
-nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed,
- u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *vmm)
+nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
+ enum nvif_vmm_type type, u64 addr, u64 size, void *argv, u32 argc,
+ struct nvif_vmm *vmm)
{
struct nvif_vmm_v0 *args;
u32 argn = sizeof(*args) + argc;
@@ -125,9 +210,18 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed,
if (!(args = kmalloc(argn, GFP_KERNEL)))
return -ENOMEM;
args->version = 0;
- args->managed = managed;
args->addr = addr;
args->size = size;
+
+ switch (type) {
+ case UNMANAGED: args->type = NVIF_VMM_V0_TYPE_UNMANAGED; break;
+ case MANAGED: args->type = NVIF_VMM_V0_TYPE_MANAGED; break;
+ case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
memcpy(args->data, argv, argc);
ret = nvif_object_ctor(&mmu->object, name ? name : "nvifVmm", 0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/intr.c b/drivers/gpu/drm/nouveau/nvkm/core/intr.c
index e20b7ca218c3..36a747f0039e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/intr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/intr.c
@@ -212,8 +212,8 @@ nvkm_intr(int irq, void *arg)
list_for_each_entry(intr, &device->intr.intr, head) {
for (leaf = 0; leaf < intr->leaves; leaf++) {
if (intr->stat[leaf]) {
- nvkm_warn(intr->subdev, "intr%d: %08x\n",
- leaf, intr->stat[leaf]);
+ nvkm_debug(intr->subdev, "intr%d: %08x\n",
+ leaf, intr->stat[leaf]);
nvkm_intr_block_locked(intr, leaf, intr->stat[leaf]);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 301a5e5b5f7f..7c554c14e884 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -185,7 +185,7 @@ nvkm_object_fini(struct nvkm_object *object, bool suspend)
nvif_debug(object, "%s children...\n", action);
time = ktime_to_us(ktime_get());
- list_for_each_entry(child, &object->tree, head) {
+ list_for_each_entry_reverse(child, &object->tree, head) {
ret = nvkm_object_fini(child, suspend);
if (ret && suspend)
goto fail_child;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
index 6648ed62daa6..315a69f7fdd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
@@ -36,6 +36,15 @@ ga100_ce_intr(struct nvkm_inth *inth)
}
int
+ga100_ce_nonstall(struct nvkm_engine *engine)
+{
+ struct nvkm_subdev *subdev = &engine->subdev;
+ struct nvkm_device *device = subdev->device;
+
+ return nvkm_rd32(device, 0x104424 + (subdev->inst * 0x80)) & 0x00000fff;
+}
+
+int
ga100_ce_fini(struct nvkm_engine *engine, bool suspend)
{
nvkm_inth_block(&engine->subdev.inth);
@@ -67,6 +76,7 @@ ga100_ce = {
.oneinit = ga100_ce_oneinit,
.init = ga100_ce_init,
.fini = ga100_ce_fini,
+ .nonstall = ga100_ce_nonstall,
.cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, AMPERE_DMA_COPY_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
index 9f3448ad625f..461b73c7e2e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
@@ -28,6 +28,7 @@ ga102_ce = {
.oneinit = ga100_ce_oneinit,
.init = ga100_ce_init,
.fini = ga100_ce_fini,
+ .nonstall = ga100_ce_nonstall,
.cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, AMPERE_DMA_COPY_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index c4c046916fa6..0be72c463b21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -3,7 +3,7 @@
#define __NVKM_CE_PRIV_H__
#include <engine/ce.h>
-void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_chan *);
void gk104_ce_intr(struct nvkm_engine *);
void gp100_ce_intr(struct nvkm_engine *);
@@ -12,4 +12,5 @@ extern const struct nvkm_object_func gv100_ce_cclass;
int ga100_ce_oneinit(struct nvkm_engine *);
int ga100_ce_init(struct nvkm_engine *);
int ga100_ce_fini(struct nvkm_engine *, bool);
+int ga100_ce_nonstall(struct nvkm_engine *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 40c8ea43c42f..b8ac66b4a2c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -26,6 +26,8 @@
#include "head.h"
#include "ior.h"
+#include <drm/display/drm_dp.h>
+
#include <subdev/bios.h>
#include <subdev/bios/init.h>
#include <subdev/gpio.h>
@@ -634,6 +636,50 @@ nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp)
return outp->dp.rates != 0;
}
+/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps()
+ * converted to work inside nvkm. This is a temporary holdover until we start
+ * passing the drm_dp_aux device through NVKM
+ */
+static int
+nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp)
+{
+ struct nvkm_i2c_aux *aux = outp->dp.aux;
+ u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
+ int ret;
+
+ ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Prior to DP1.3 the bit represented by
+ * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
+ * If it is set DP_DPCD_REV at 0000h could be at a value less than
+ * the true capability of the panel. The only way to check is to
+ * then compare 0000h and 2200h.
+ */
+ if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
+ return 0;
+
+ ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext));
+ if (ret < 0)
+ return ret;
+
+ if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
+ OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n",
+ outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
+ return 0;
+ }
+
+ if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)))
+ return 0;
+
+ memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext));
+
+ return 0;
+}
+
void
nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
{
@@ -689,7 +735,7 @@ nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
}
- if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, sizeof(outp->dp.dpcd))) {
+ if (!nvkm_dp_read_dpcd_caps(outp)) {
const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
const u8 *rate;
int rate_max;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index a4853c4e5ee3..67ef889a0c5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -295,6 +295,7 @@ g94_sor = {
.clock = nv50_sor_clock,
.war_2 = g94_sor_war_2,
.war_3 = g94_sor_war_3,
+ .hdmi = &g84_sor_hdmi,
.dp = &g94_sor_dp,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index a2c7c6f83dcd..506ffbe7b842 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -125,7 +125,7 @@ gt215_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 siz
pack_hdmi_infoframe(&avi, data, size);
nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
- if (size)
+ if (!size)
return;
nvkm_wr32(device, 0x61c528 + soff, avi.header);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
index dad942be6679..46b057fe1412 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
@@ -81,20 +81,29 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
return -ENOSYS;
list_for_each_entry(outp, &conn->disp->outps, head) {
- if (outp->info.connector == conn->index && outp->dp.aux) {
- if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_I2C_PLUG;
- if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_I2C_UNPLUG;
- if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ ) bits |= NVKM_I2C_IRQ;
+ if (outp->info.connector == conn->index)
+ break;
+ }
- return nvkm_uevent_add(uevent, &device->i2c->event, outp->dp.aux->id, bits,
- nvkm_uconn_uevent_aux);
- }
+ if (&outp->head == &conn->disp->outps)
+ return -EINVAL;
+
+ if (outp->dp.aux && !outp->info.location) {
+ if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_I2C_PLUG;
+ if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_I2C_UNPLUG;
+ if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ ) bits |= NVKM_I2C_IRQ;
+
+ return nvkm_uevent_add(uevent, &device->i2c->event, outp->dp.aux->id, bits,
+ nvkm_uconn_uevent_aux);
}
if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_GPIO_HI;
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_GPIO_LO;
- if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ)
- return -EINVAL;
+ if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ) {
+ /* TODO: support DP IRQ on ANX9805 and remove this hack. */
+ if (!outp->info.location)
+ return -EINVAL;
+ }
return nvkm_uevent_add(uevent, &device->gpio->event, conn->info.hpd, bits,
nvkm_uconn_uevent_gpio);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 5ea9a2ff0663..5db37247dc29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -283,11 +283,21 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
}
/* Initialise non-stall intr handling. */
- if (fifo->func->nonstall_ctor) {
- ret = fifo->func->nonstall_ctor(fifo);
- if (ret) {
- nvkm_error(subdev, "nonstall %d\n", ret);
+ if (fifo->func->nonstall) {
+ if (fifo->func->nonstall_ctor) {
+ ret = fifo->func->nonstall_ctor(fifo);
+ if (ret < 0) {
+ nvkm_error(subdev, "nonstall %d\n", ret);
+ return ret;
+ }
+ } else {
+ ret = 1;
}
+
+ ret = nvkm_event_init(fifo->func->nonstall, &fifo->engine.subdev, 1, ret,
+ &fifo->nonstall.event);
+ if (ret)
+ return ret;
}
/* Allocate USERD + BAR1 polling area. */
@@ -358,7 +368,6 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
{
struct nvkm_fifo *fifo;
- int ret;
if (!(fifo = *pfifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
return -ENOMEM;
@@ -374,16 +383,5 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
spin_lock_init(&fifo->lock);
mutex_init(&fifo->mutex);
- ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
- if (ret)
- return ret;
-
- if (func->nonstall) {
- ret = nvkm_event_init(func->nonstall, &fifo->engine.subdev, 1, 1,
- &fifo->nonstall.event);
- if (ret)
- return ret;
- }
-
- return 0;
+ return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
index 12a5d99d5e77..c56d2a839efb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
@@ -32,9 +32,6 @@
#include <nvif/class.h>
-/*TODO: allocate? */
-#define GA100_FIFO_NONSTALL_VECTOR 0
-
static u32
ga100_chan_doorbell_handle(struct nvkm_chan *chan)
{
@@ -83,7 +80,7 @@ ga100_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
nvkm_wo32(chan->inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->inst, 0x0e8, chan->id);
nvkm_wo32(chan->inst, 0x0f4, 0x00001000 | (priv ? 0x00000100 : 0x00000000));
- nvkm_wo32(chan->inst, 0x0f8, 0x80000000 | GA100_FIFO_NONSTALL_VECTOR);
+ nvkm_wo32(chan->inst, 0x0f8, 0x80000000 | chan->cgrp->runl->nonstall.vector);
nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
nvkm_done(chan->inst);
return 0;
@@ -148,8 +145,20 @@ ga100_engn_cxid(struct nvkm_engn *engn, bool *cgid)
return -ENODEV;
}
+static int
+ga100_engn_nonstall(struct nvkm_engn *engn)
+{
+ struct nvkm_engine *engine = engn->engine;
+
+ if (WARN_ON(!engine->func->nonstall))
+ return -EINVAL;
+
+ return engine->func->nonstall(engine);
+}
+
const struct nvkm_engn_func
ga100_engn = {
+ .nonstall = ga100_engn_nonstall,
.cxid = ga100_engn_cxid,
.ctor = gk104_ectx_ctor,
.bind = gv100_ectx_bind,
@@ -157,6 +166,7 @@ ga100_engn = {
const struct nvkm_engn_func
ga100_engn_ce = {
+ .nonstall = ga100_engn_nonstall,
.cxid = ga100_engn_cxid,
.ctor = gv100_ectx_ce_ctor,
.bind = gv100_ectx_ce_bind,
@@ -429,7 +439,9 @@ static int
ga100_runl_new(struct nvkm_fifo *fifo, int id, u32 addr, struct nvkm_runl **prunl)
{
struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_top_device *tdev;
struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
u32 chcfg = nvkm_rd32(device, addr + 0x004);
u32 chnum = 1 << (chcfg & 0x0000000f);
u32 chaddr = (chcfg & 0xfffffff0);
@@ -437,26 +449,55 @@ ga100_runl_new(struct nvkm_fifo *fifo, int id, u32 addr, struct nvkm_runl **prun
u32 vector = nvkm_rd32(device, addr + 0x160);
int i, ret;
- runl = *prunl = nvkm_runl_new(fifo, id, addr, chnum);
+ runl = nvkm_runl_new(fifo, id, addr, chnum);
if (IS_ERR(runl))
return PTR_ERR(runl);
+ *prunl = runl;
+
for (i = 0; i < 2; i++) {
u32 pbcfg = nvkm_rd32(device, addr + 0x010 + (i * 0x04));
if (pbcfg & 0x80000000) {
runl->runq[runl->runq_nr] =
nvkm_runq_new(fifo, ((pbcfg & 0x03fffc00) - 0x040000) / 0x800);
- if (!runl->runq[runl->runq_nr])
+ if (!runl->runq[runl->runq_nr]) {
+ RUNL_ERROR(runl, "runq %d", runl->runq_nr);
return -ENOMEM;
+ }
runl->runq_nr++;
}
}
+ nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist == runl->addr) {
+ if (tdev->engine < 0) {
+ RUNL_DEBUG(runl, "engn !top");
+ return -EINVAL;
+ }
+
+ engn = nvkm_runl_add(runl, tdev->engine, (tdev->type == NVKM_ENGINE_CE) ?
+ fifo->func->engn_ce : fifo->func->engn,
+ tdev->type, tdev->inst);
+ if (!engn)
+ return -EINVAL;
+
+ if (!engn->engine->func->nonstall) {
+ RUNL_DEBUG(runl, "engn %s !nonstall", engn->engine->subdev.name);
+ return -EINVAL;
+ }
+ }
+
+ if (list_empty(&runl->engns)) {
+ RUNL_DEBUG(runl, "!engns");
+ return -EINVAL;
+ }
+
ret = nvkm_inth_add(&device->vfn->intr, vector & 0x00000fff, NVKM_INTR_PRIO_NORMAL,
&fifo->engine.subdev, ga100_runl_intr, &runl->inth);
- if (ret)
+ if (ret) {
+ RUNL_ERROR(runl, "inth %d", ret);
return ret;
+ }
runl->chan = chaddr;
runl->doorbell = dbcfg >> 16;
@@ -466,9 +507,9 @@ ga100_runl_new(struct nvkm_fifo *fifo, int id, u32 addr, struct nvkm_runl **prun
static irqreturn_t
ga100_fifo_nonstall_intr(struct nvkm_inth *inth)
{
- struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), nonstall.intr);
+ struct nvkm_runl *runl = container_of(inth, typeof(*runl), nonstall.inth);
- nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
+ nvkm_event_ntfy(&runl->fifo->nonstall.event, runl->id, NVKM_FIFO_NONSTALL_EVENT);
return IRQ_HANDLED;
}
@@ -476,16 +517,18 @@ static void
ga100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
{
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+ struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
- nvkm_inth_block(&fifo->nonstall.intr);
+ nvkm_inth_block(&runl->nonstall.inth);
}
static void
ga100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
{
struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+ struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
- nvkm_inth_allow(&fifo->nonstall.intr);
+ nvkm_inth_allow(&runl->nonstall.inth);
}
const struct nvkm_event_func
@@ -497,9 +540,29 @@ ga100_fifo_nonstall = {
int
ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
{
- return nvkm_inth_add(&fifo->engine.subdev.device->vfn->intr, GA100_FIFO_NONSTALL_VECTOR,
- NVKM_INTR_PRIO_NORMAL, &fifo->engine.subdev, ga100_fifo_nonstall_intr,
- &fifo->nonstall.intr);
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_vfn *vfn = subdev->device->vfn;
+ struct nvkm_runl *runl;
+ int ret, nr = 0;
+
+ nvkm_runl_foreach(runl, fifo) {
+ struct nvkm_engn *engn = list_first_entry(&runl->engns, typeof(*engn), head);
+
+ runl->nonstall.vector = engn->func->nonstall(engn);
+ if (runl->nonstall.vector < 0) {
+ RUNL_ERROR(runl, "nonstall %d", runl->nonstall.vector);
+ return runl->nonstall.vector;
+ }
+
+ ret = nvkm_inth_add(&vfn->intr, runl->nonstall.vector, NVKM_INTR_PRIO_NORMAL,
+ subdev, ga100_fifo_nonstall_intr, &runl->nonstall.inth);
+ if (ret)
+ return ret;
+
+ nr = max(nr, runl->id + 1);
+ }
+
+ return nr;
}
int
@@ -514,15 +577,13 @@ ga100_fifo_runl_ctor(struct nvkm_fifo *fifo)
runl = nvkm_runl_get(fifo, -1, tdev->runlist);
if (!runl) {
ret = ga100_runl_new(fifo, id++, tdev->runlist, &runl);
- if (ret)
- return ret;
- }
-
- if (tdev->engine < 0)
- continue;
+ if (ret) {
+ if (runl)
+ nvkm_runl_del(runl);
- nvkm_runl_add(runl, tdev->engine, (tdev->type == NVKM_ENGINE_CE) ?
- fifo->func->engn_ce : fifo->func->engn, tdev->type, tdev->inst);
+ continue;
+ }
+ }
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
index 93d628d7d508..454a481a0aef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
@@ -399,7 +399,7 @@ nvkm_runl_new(struct nvkm_fifo *fifo, int runi, u32 addr, int id_nr)
int ret;
if (!(runl = kzalloc(sizeof(*runl), GFP_KERNEL)))
- return NULL;
+ return ERR_PTR(-ENOMEM);
runl->func = fifo->func->runl;
runl->fifo = fifo;
@@ -419,7 +419,7 @@ nvkm_runl_new(struct nvkm_fifo *fifo, int runi, u32 addr, int id_nr)
(ret = nvkm_chid_new(&nvkm_chan_event, subdev, id_nr, 0, id_nr, &runl->chid))) {
RUNL_ERROR(runl, "cgid/chid: %d", ret);
nvkm_runl_del(runl);
- return NULL;
+ return ERR_PTR(ret);
}
} else {
runl->cgid = nvkm_chid_ref(fifo->cgid);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
index c93d21bb7bd5..5421321f8e85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
@@ -11,6 +11,7 @@ enum nvkm_subdev_type;
struct nvkm_engn {
const struct nvkm_engn_func {
+ int (*nonstall)(struct nvkm_engn *);
bool (*chsw)(struct nvkm_engn *);
int (*cxid)(struct nvkm_engn *, bool *cgid);
void (*mmu_fault_trigger)(struct nvkm_engn *);
@@ -69,6 +70,11 @@ struct nvkm_runl {
struct nvkm_inth inth;
+ struct {
+ int vector;
+ struct nvkm_inth inth;
+ } nonstall;
+
struct list_head cgrps;
int cgrp_nr;
int chan_nr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
index 1dac95ae7b43..04140e0110be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
@@ -52,7 +52,7 @@ nvkm_uchan_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
switch (args->v0.type) {
case NVIF_CHAN_EVENT_V0_NON_STALL_INTR:
- return nvkm_uevent_add(uevent, &runl->fifo->nonstall.event, 0,
+ return nvkm_uevent_add(uevent, &runl->fifo->nonstall.event, runl->id,
NVKM_FIFO_NONSTALL_EVENT, NULL);
case NVIF_CHAN_EVENT_V0_KILLED:
return nvkm_uevent_add(uevent, &runl->chid->event, chan->id,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
index 71b824e6da9d..0096ad401b15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -109,8 +109,7 @@ nvkm_gr_oclass_get(struct nvkm_oclass *oclass, int index)
}
static int
-nvkm_gr_cclass_new(struct nvkm_fifo_chan *chan,
- const struct nvkm_oclass *oclass,
+nvkm_gr_cclass_new(struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
struct nvkm_gr *gr = nvkm_gr(oclass->engine);
@@ -127,6 +126,17 @@ nvkm_gr_intr(struct nvkm_engine *engine)
}
static int
+nvkm_gr_nonstall(struct nvkm_engine *engine)
+{
+ struct nvkm_gr *gr = nvkm_gr(engine);
+
+ if (gr->func->nonstall)
+ return gr->func->nonstall(gr);
+
+ return -EINVAL;
+}
+
+static int
nvkm_gr_oneinit(struct nvkm_engine *engine)
{
struct nvkm_gr *gr = nvkm_gr(engine);
@@ -178,6 +188,7 @@ nvkm_gr = {
.init = nvkm_gr_init,
.fini = nvkm_gr_fini,
.reset = nvkm_gr_reset,
+ .nonstall = nvkm_gr_nonstall,
.intr = nvkm_gr_intr,
.tile = nvkm_gr_tile,
.chsw_load = nvkm_gr_chsw_load,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 00dbeda7e346..de161e7a04aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -117,6 +117,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *);
extern const struct gf100_grctx_func gk110_grctx;
void gk110_grctx_generate_r419eb0(struct gf100_gr *);
+void gk110_grctx_generate_r419f78(struct gf100_gr *);
extern const struct gf100_grctx_func gk110b_grctx;
extern const struct gf100_grctx_func gk208_grctx;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 94233d0119df..52a234b1ef01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -906,7 +906,9 @@ static void
gk104_grctx_generate_r419f78(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
- nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
+
+ /* bit 3 set disables loads in fp helper invocations, we need it enabled */
+ nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index 4391458e1fb2..3acdd9eeb74a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr)
nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000);
}
+void
+gk110_grctx_generate_r419f78(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ /* bit 3 set disables loads in fp helper invocations, we need it enabled */
+ nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000);
+}
+
const struct gf100_grctx_func
gk110_grctx = {
.main = gf100_grctx_generate_main,
@@ -854,4 +863,5 @@ gk110_grctx = {
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
.r419eb0 = gk110_grctx_generate_r419eb0,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index 7b9a34f9ec3c..5597e87624ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -103,4 +103,5 @@ gk110b_grctx = {
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
.r419eb0 = gk110_grctx_generate_r419eb0,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index c78d07a8bb7d..612656496541 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -568,4 +568,5 @@ gk208_grctx = {
.dist_skip_table = gf117_grctx_generate_dist_skip_table,
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index beac66eb2a80..9906974ac3f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -988,4 +988,5 @@ gm107_grctx = {
.r406500 = gm107_grctx_generate_r406500,
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r419e00 = gm107_grctx_generate_r419e00,
+ .r419f78 = gk110_grctx_generate_r419f78,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
index a5b5ac2755a2..00cd70abad67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
@@ -137,8 +137,15 @@ ga102_gr_oneinit_intr(struct gf100_gr *gr, enum nvkm_intr_type *pvector)
return &device->vfn->intr;
}
+static int
+ga102_gr_nonstall(struct gf100_gr *gr)
+{
+ return nvkm_rd32(gr->base.engine.subdev.device, 0x400160) & 0x00000fff;
+}
+
static const struct gf100_gr_func
ga102_gr = {
+ .nonstall = ga102_gr_nonstall,
.oneinit_intr = ga102_gr_oneinit_intr,
.oneinit_tiles = gm200_gr_oneinit_tiles,
.oneinit_sm_id = gv100_gr_oneinit_sm_id,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 5f20079c3660..3648868bb9fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -374,7 +374,7 @@ gf100_gr_chan = {
};
static int
-gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
@@ -2494,12 +2494,24 @@ gf100_gr_gpccs_ucode = {
.data.size = sizeof(gf100_grgpc_data),
};
+static int
+gf100_gr_nonstall(struct nvkm_gr *base)
+{
+ struct gf100_gr *gr = gf100_gr(base);
+
+ if (gr->func->nonstall)
+ return gr->func->nonstall(gr);
+
+ return -EINVAL;
+}
+
static const struct nvkm_gr_func
gf100_gr_ = {
.dtor = gf100_gr_dtor,
.oneinit = gf100_gr_oneinit,
.init = gf100_gr_init_,
.fini = gf100_gr_fini,
+ .nonstall = gf100_gr_nonstall,
.reset = gf100_gr_reset,
.units = gf100_gr_units,
.chan_new = gf100_gr_chan_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 94ca7ac16acf..54f686ba39ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -147,6 +147,7 @@ struct gf100_gr_func_zbc {
};
struct gf100_gr_func {
+ int (*nonstall)(struct gf100_gr *);
struct nvkm_intr *(*oneinit_intr)(struct gf100_gr *, enum nvkm_intr_type *);
void (*oneinit_tiles)(struct gf100_gr *);
int (*oneinit_sm_id)(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index 81bd682c2102..ca822f07b63e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -1181,7 +1181,7 @@ nv04_gr_chan = {
};
static int
-nv04_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv04_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv04_gr *gr = nv04_gr(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index 7fe6e58f6bab..92ef7c9b2910 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -999,7 +999,7 @@ nv10_gr_chan = {
} while (0)
int
-nv10_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv10_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv10_gr *gr = nv10_gr(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
index 5cfe927c9123..b86090c08060 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -9,6 +9,6 @@ int nv10_gr_init(struct nvkm_gr *);
void nv10_gr_intr(struct nvkm_gr *);
void nv10_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
-int nv10_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+int nv10_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *,
const struct nvkm_oclass *, struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 75434f5de7ad..02a8c62a0a32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -72,7 +72,7 @@ nv20_gr_chan = {
};
static int
-nv20_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv20_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index 94685e4d4f87..d6bc6904dcc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -18,7 +18,7 @@ nv25_gr_chan = {
};
static int
-nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index 2d6273675291..e5a351b51eb9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -18,7 +18,7 @@ nv2a_gr_chan = {
};
static int
-nv2a_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv2a_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index 647bd6fede04..80370323755e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -19,7 +19,7 @@ nv30_gr_chan = {
};
static int
-nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 2eae3fe4ef4e..cdf043bbdd59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -18,7 +18,7 @@ nv34_gr_chan = {
};
static int
-nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 657d7cdba369..fa5a6ccb871d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -18,7 +18,7 @@ nv35_gr_chan = {
};
static int
-nv35_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv35_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index d2df097a6cf6..a5e1f02791b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -145,7 +145,7 @@ nv40_gr_chan = {
};
int
-nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv40_gr *gr = nv40_gr(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index f3d3d3a5ae5b..84fbc99139e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -22,12 +22,12 @@ u64 nv40_gr_units(struct nvkm_gr *);
struct nv40_gr_chan {
struct nvkm_object object;
struct nv40_gr *gr;
- struct nvkm_fifo_chan *fifo;
+ struct nvkm_chan *fifo;
u32 inst;
struct list_head head;
};
-int nv40_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+int nv40_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *,
const struct nvkm_oclass *, struct nvkm_object **);
extern const struct nvkm_object_func nv40_gr_object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index 1ba18a8e380f..c8a0288c092d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -86,7 +86,7 @@ nv50_gr_chan = {
};
int
-nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv50_gr *gr = nv50_gr(base);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index 84388c42e5c6..97ead0042357 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -27,7 +27,7 @@ struct nv50_gr_chan {
struct nv50_gr *gr;
};
-int nv50_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+int nv50_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *,
const struct nvkm_oclass *, struct nvkm_object **);
extern const struct nvkm_object_func nv50_gr_object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index 08d5c96e6458..0884abc73a9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -5,7 +5,7 @@
#include <engine/gr.h>
#include <core/enum.h>
struct nvkm_fb_tile;
-struct nvkm_fifo_chan;
+struct nvkm_chan;
int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
bool enable, struct nvkm_gr *);
@@ -18,10 +18,11 @@ struct nvkm_gr_func {
int (*init)(struct nvkm_gr *);
int (*fini)(struct nvkm_gr *, bool);
int (*reset)(struct nvkm_gr *);
+ int (*nonstall)(struct nvkm_gr *);
void (*intr)(struct nvkm_gr *);
void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *);
int (*tlb_flush)(struct nvkm_gr *);
- int (*chan_new)(struct nvkm_gr *, struct nvkm_fifo_chan *,
+ int (*chan_new)(struct nvkm_gr *, struct nvkm_chan *,
const struct nvkm_oclass *, struct nvkm_object **);
int (*object_get)(struct nvkm_gr *, int, struct nvkm_sclass *);
/* Returns chipset-specific counts of units packed into an u64.
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index 3b6c8100a242..a7775aa18541 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -206,19 +206,6 @@ tu102_gr_av_to_init_veid(struct nvkm_blob *blob, struct gf100_gr_pack **ppack)
return gk20a_gr_av_to_init_(blob, 64, 0x00100000, ppack);
}
-int
-tu102_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
-{
- int ret;
-
- ret = gm200_gr_load(gr, ver, fwif);
- if (ret)
- return ret;
-
- return gk20a_gr_load_net(gr, "gr/", "sw_veid_bundle_init", ver, tu102_gr_av_to_init_veid,
- &gr->bundle_veid);
-}
-
static const struct gf100_gr_fwif
tu102_gr_fwif[] = {
{ 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index cb0c3991b2ad..db9fc1ecae0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -81,8 +81,7 @@ nv31_mpeg_chan = {
};
int
-nv31_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
- const struct nvkm_oclass *oclass,
+nv31_mpeg_chan_new(struct nvkm_chan *fifoch, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index 9f30aaaf809e..251d659565de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -24,9 +24,9 @@ struct nv31_mpeg_func {
struct nv31_mpeg_chan {
struct nvkm_object object;
struct nv31_mpeg *mpeg;
- struct nvkm_fifo_chan *fifo;
+ struct nvkm_chan *fifo;
};
-int nv31_mpeg_chan_new(struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+int nv31_mpeg_chan_new(struct nvkm_chan *, const struct nvkm_oclass *,
struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index 0890a279458e..4b1374adbda3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -43,7 +43,7 @@ struct nv44_mpeg {
struct nv44_mpeg_chan {
struct nvkm_object object;
struct nv44_mpeg *mpeg;
- struct nvkm_fifo_chan *fifo;
+ struct nvkm_chan *fifo;
struct list_head head;
u32 inst;
};
@@ -100,8 +100,7 @@ nv44_mpeg_chan = {
};
static int
-nv44_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
- const struct nvkm_oclass *oclass,
+nv44_mpeg_chan_new(struct nvkm_chan *fifoch, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
struct nv44_mpeg *mpeg = nv44_mpeg(oclass->engine);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
index 667a2d05dd89..044ff4133874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
@@ -2,7 +2,7 @@
#ifndef __NVKM_MPEG_PRIV_H__
#define __NVKM_MPEG_PRIV_H__
#include <engine/mpeg.h>
-struct nvkm_fifo_chan;
+struct nvkm_chan;
int nv31_mpeg_init(struct nvkm_engine *);
void nv31_mpeg_tile(struct nvkm_engine *, int, struct nvkm_fb_tile *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
index a9d464db6974..20220d6d4a13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
@@ -74,8 +74,7 @@ nvkm_sw_oclass_get(struct nvkm_oclass *oclass, int index)
}
static int
-nvkm_sw_cclass_get(struct nvkm_fifo_chan *fifoch,
- const struct nvkm_oclass *oclass,
+nvkm_sw_cclass_get(struct nvkm_chan *fifoch, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
struct nvkm_sw *sw = nvkm_sw(oclass->engine);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
index 834b8cbed51d..2bf45141de60 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
@@ -74,7 +74,7 @@ nvkm_sw_chan = {
int
nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *func, struct nvkm_sw *sw,
- struct nvkm_fifo_chan *fifo, const struct nvkm_oclass *oclass,
+ struct nvkm_chan *fifo, const struct nvkm_oclass *oclass,
struct nvkm_sw_chan *chan)
{
unsigned long flags;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
index 67b2e5ea93d9..c313aea16a17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -11,7 +11,7 @@ struct nvkm_sw_chan {
const struct nvkm_sw_chan_func *func;
struct nvkm_object object;
struct nvkm_sw *sw;
- struct nvkm_fifo_chan *fifo;
+ struct nvkm_chan *fifo;
struct list_head head;
#define NVKM_SW_CHAN_EVENT_PAGE_FLIP BIT(0)
@@ -24,7 +24,7 @@ struct nvkm_sw_chan_func {
};
int nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *, struct nvkm_sw *,
- struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+ struct nvkm_chan *, const struct nvkm_oclass *,
struct nvkm_sw_chan *);
bool nvkm_sw_chan_mthd(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
index c3cf6f2ff86c..a0273baf4c67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
@@ -102,7 +102,7 @@ gf100_sw_chan = {
};
static int
-gf100_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+gf100_sw_chan_new(struct nvkm_sw *sw, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
index 4aa57573869c..8a1d112da894 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
@@ -106,7 +106,7 @@ nv04_sw_chan = {
};
static int
-nv04_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+nv04_sw_chan_new(struct nvkm_sw *sw, struct nvkm_chan *fifo,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv04_sw_chan *chan;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
index e79e640ae535..742c75859569 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
@@ -36,7 +36,7 @@ nv10_sw_chan = {
};
static int
-nv10_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+nv10_sw_chan_new(struct nvkm_sw *sw, struct nvkm_chan *fifo,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nvkm_sw_chan *chan;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
index 9d7a9b7d5be3..99476d32c5af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
@@ -99,7 +99,7 @@ nv50_sw_chan = {
};
static int
-nv50_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+nv50_sw_chan_new(struct nvkm_sw *sw, struct nvkm_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nvkm_disp *disp = sw->engine.subdev.device->disp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
index d9d83b1b8849..8015afaba947 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -15,7 +15,7 @@ struct nvkm_sw_chan_sclass {
};
struct nvkm_sw_func {
- int (*chan_new)(struct nvkm_sw *, struct nvkm_fifo_chan *,
+ int (*chan_new)(struct nvkm_sw *, struct nvkm_chan *,
const struct nvkm_oclass *, struct nvkm_object **);
const struct nvkm_sw_chan_sclass sclass[];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
index 795f3a649b12..9b8ca4e898f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -224,7 +224,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
u64 falcons;
int ret, i;
- if (list_empty(&acr->hsfw)) {
+ if (list_empty(&acr->hsfw) || !acr->func || !acr->func->wpr_layout) {
nvkm_debug(subdev, "No HSFW(s)\n");
nvkm_acr_cleanup(acr);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 6ba5120a2ebe..394c305e759a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -55,7 +55,7 @@ nvkm-y += nvkm/subdev/fb/ramgk104.o
nvkm-y += nvkm/subdev/fb/ramgm107.o
nvkm-y += nvkm/subdev/fb/ramgm200.o
nvkm-y += nvkm/subdev/fb/ramgp100.o
-nvkm-y += nvkm/subdev/fb/ramga102.o
+nvkm-y += nvkm/subdev/fb/ramgp102.o
nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o
nvkm-y += nvkm/subdev/fb/gddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 0955340cc421..8a286a9349ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -174,6 +174,18 @@ nvkm_fb_mem_unlock(struct nvkm_fb *fb)
return 0;
}
+u64
+nvkm_fb_vidmem_size(struct nvkm_device *device)
+{
+ struct nvkm_fb *fb = device->fb;
+
+ if (fb && fb->func->vidmem.size)
+ return fb->func->vidmem.size(fb);
+
+ WARN_ON(1);
+ return 0;
+}
+
static int
nvkm_fb_init(struct nvkm_subdev *subdev)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
index a7456e786463..12037fd4fdf2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
@@ -30,7 +30,8 @@ ga100_fb = {
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
- .ram_new = gp100_ram_new,
+ .vidmem.size = gp102_fb_vidmem_size,
+ .ram_new = gp102_ram_new,
.default_bigpage = 16,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index dd476e079fe1..76f6877b54c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -24,6 +24,12 @@
#include <engine/nvdec.h>
+static u64
+ga102_fb_vidmem_size(struct nvkm_fb *fb)
+{
+ return (u64)nvkm_rd32(fb->subdev.device, 0x1183a4) << 20;
+}
+
static int
ga102_fb_oneinit(struct nvkm_fb *fb)
{
@@ -43,7 +49,8 @@ ga102_fb = {
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
- .ram_new = ga102_ram_new,
+ .vidmem.size = ga102_fb_vidmem_size,
+ .ram_new = gp102_ram_new,
.default_bigpage = 16,
.vpr.scrub_required = tu102_fb_vpr_scrub_required,
.vpr.scrub = gp102_fb_vpr_scrub,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index 14d942e8b857..534553c64805 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -40,6 +40,20 @@ gp102_fb_vpr_scrub_required(struct nvkm_fb *fb)
return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0;
}
+u64
+gp102_fb_vidmem_size(struct nvkm_fb *fb)
+{
+ const u32 data = nvkm_rd32(fb->subdev.device, 0x100ce0);
+ const u32 lmag = (data & 0x000003f0) >> 4;
+ const u32 lsca = (data & 0x0000000f);
+ const u64 size = (u64)lmag << (lsca + 20);
+
+ if (data & 0x40000000)
+ return size / 16 * 15;
+
+ return size;
+}
+
int
gp102_fb_oneinit(struct nvkm_fb *fb)
{
@@ -59,9 +73,10 @@ gp102_fb = {
.init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+ .vidmem.size = gp102_fb_vidmem_size,
.vpr.scrub_required = gp102_fb_vpr_scrub_required,
.vpr.scrub = gp102_fb_vpr_scrub,
- .ram_new = gp100_ram_new,
+ .ram_new = gp102_ram_new,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 4d8a286a7a34..f422564bee5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -36,9 +36,10 @@ gv100_fb = {
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+ .vidmem.size = gp102_fb_vidmem_size,
.vpr.scrub_required = gp102_fb_vpr_scrub_required,
.vpr.scrub = gp102_fb_vpr_scrub,
- .ram_new = gp100_ram_new,
+ .ram_new = gp102_ram_new,
.default_bigpage = 16,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 726c30c8bf95..77d6a8c10829 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -20,6 +20,10 @@ struct nvkm_fb_func {
void (*flush_page_init)(struct nvkm_fb *);
} sysmem;
+ struct nvkm_fb_func_vidmem {
+ u64 (*size)(struct nvkm_fb *);
+ } vidmem;
+
struct {
bool (*scrub_required)(struct nvkm_fb *);
int (*scrub)(struct nvkm_fb *);
@@ -84,6 +88,7 @@ void gp100_fb_init_remapper(struct nvkm_fb *);
void gp100_fb_init_unkn(struct nvkm_fb *);
int gp102_fb_oneinit(struct nvkm_fb *);
+u64 gp102_fb_vidmem_size(struct nvkm_fb *);
bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
int gp102_fb_vpr_scrub(struct nvkm_fb *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index ea7d66f3dd82..50f0c1914f58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -70,5 +70,5 @@ int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm200_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
-int ga102_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gp102_ram_new(struct nvkm_fb *, struct nvkm_ram **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
new file mode 100644
index 000000000000..8550f5e47347
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: MIT
+#include "ram.h"
+
+#include <subdev/bios.h>
+
+static const struct nvkm_ram_func
+gp102_ram = {
+};
+
+int
+gp102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ enum nvkm_ram_type type = nvkm_fb_bios_memtype(fb->subdev.device->bios);
+ const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+ u64 size = fb->func->vidmem.size(fb);
+ int ret;
+
+ ret = nvkm_ram_new_(&gp102_ram, fb, type, size, pram);
+ if (ret)
+ return ret;
+
+ nvkm_mm_fini(&(*pram)->vram);
+
+ return nvkm_mm_init(&(*pram)->vram, NVKM_RAM_MM_NORMAL,
+ rsvd_head >> NVKM_RAM_MM_SHIFT,
+ (size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
+ 1);
+
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
index b8803c124c3b..bcc23d4c8115 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
@@ -36,9 +36,10 @@ tu102_fb = {
.init_page = gv100_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+ .vidmem.size = gp102_fb_vidmem_size,
.vpr.scrub_required = tu102_fb_vpr_scrub_required,
.vpr.scrub = gp102_fb_vpr_scrub,
- .ram_new = gp100_ram_new,
+ .ram_new = gp102_ram_new,
.default_bigpage = 16,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 976539de4220..731b2f68d3db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -260,10 +260,11 @@ nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
{
struct nvkm_bios *bios = device->bios;
struct nvkm_i2c *i2c;
+ struct nvkm_i2c_aux *aux;
struct dcb_i2c_entry ccbE;
struct dcb_output dcbE;
u8 ver, hdr;
- int ret, i;
+ int ret, i, ids;
if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
return -ENOMEM;
@@ -406,5 +407,11 @@ nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
}
}
- return nvkm_event_init(&nvkm_i2c_intr_func, &i2c->subdev, 4, i, &i2c->event);
+ ids = 0;
+ list_for_each_entry(aux, &i2c->aux, head)
+ ids = max(ids, aux->id + 1);
+ if (!ids)
+ return 0;
+
+ return nvkm_event_init(&nvkm_i2c_intr_func, &i2c->subdev, 4, ids, &i2c->event);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index 524cd3c0e3fe..8e459d88ff8f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -58,10 +58,13 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
+ if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+ return -EINVAL;
+
if (size) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
}
return ret;
@@ -88,10 +91,13 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
+ if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+ return -EINVAL;
+
if (size) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
}
return ret;
@@ -113,7 +119,10 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- mutex_lock(&vmm->mutex);
+ if (nvkm_vmm_in_managed_range(vmm, addr, 0) && vmm->managed.raw)
+ return -EINVAL;
+
+ mutex_lock(&vmm->mutex.vmm);
vma = nvkm_vmm_node_search(vmm, addr);
if (ret = -ENOENT, !vma || vma->addr != addr) {
VMM_DEBUG(vmm, "lookup %016llx: %016llx",
@@ -134,7 +143,7 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
nvkm_vmm_unmap_locked(vmm, vma, false);
ret = 0;
done:
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
return ret;
}
@@ -159,13 +168,16 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
+ if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
+ return -EINVAL;
+
memory = nvkm_umem_search(client, handle);
if (IS_ERR(memory)) {
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
return PTR_ERR(memory);
}
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
VMM_DEBUG(vmm, "lookup %016llx", addr);
goto fail;
@@ -198,7 +210,7 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
}
}
vma->busy = true;
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
if (ret == 0) {
@@ -207,11 +219,11 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return 0;
}
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
vma->busy = false;
nvkm_vmm_unmap_region(vmm, vma);
fail:
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
nvkm_memory_unref(&memory);
return ret;
}
@@ -232,7 +244,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
vma = nvkm_vmm_node_search(vmm, args->v0.addr);
if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
@@ -248,7 +260,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
nvkm_vmm_put_locked(vmm, vma);
ret = 0;
done:
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
return ret;
}
@@ -275,10 +287,10 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
page, align, size, &vma);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
if (ret)
return ret;
@@ -314,6 +326,168 @@ nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return 0;
}
+static inline int
+nvkm_uvmm_page_index(struct nvkm_uvmm *uvmm, u64 size, u8 shift, u8 *refd)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ const struct nvkm_vmm_page *page;
+
+ if (likely(shift)) {
+ for (page = vmm->func->page; page->shift; page++) {
+ if (shift == page->shift)
+ break;
+ }
+
+ if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
+ VMM_DEBUG(vmm, "page %d %016llx", shift, size);
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+ *refd = page - vmm->func->page;
+
+ return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_get(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ return nvkm_vmm_raw_get(vmm, args->addr, args->size, refd);
+}
+
+static int
+nvkm_uvmm_mthd_raw_put(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_raw_put(vmm, args->addr, args->size, refd);
+
+ return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_map(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma vma = {
+ .addr = args->addr,
+ .size = args->size,
+ .used = true,
+ .mapref = false,
+ .no_comp = true,
+ };
+ struct nvkm_memory *memory;
+ void *argv = (void *)(uintptr_t)args->argv;
+ unsigned int argc = args->argc;
+ u64 handle = args->memory;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ vma.page = vma.refd = refd;
+
+ memory = nvkm_umem_search(client, args->memory);
+ if (IS_ERR(memory)) {
+ VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
+ return PTR_ERR(memory);
+ }
+
+ ret = nvkm_memory_map(memory, args->offset, vmm, &vma, argv, argc);
+
+ nvkm_memory_unref(&vma.memory);
+ nvkm_memory_unref(&memory);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_raw_unmap(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ u8 refd;
+ int ret;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_raw_unmap(vmm, args->addr, args->size,
+ args->sparse, refd);
+
+ return 0;
+}
+
+static int
+nvkm_uvmm_mthd_raw_sparse(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
+{
+ struct nvkm_vmm *vmm = uvmm->vmm;
+
+ if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
+ return -EINVAL;
+
+ return nvkm_vmm_raw_sparse(vmm, args->addr, args->size, args->ref);
+}
+
+static int
+nvkm_uvmm_mthd_raw(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_raw_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+
+ if (!uvmm->vmm->managed.raw)
+ return -EINVAL;
+
+ if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true)))
+ return ret;
+
+ switch (args->v0.op) {
+ case NVIF_VMM_RAW_V0_GET:
+ return nvkm_uvmm_mthd_raw_get(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_PUT:
+ return nvkm_uvmm_mthd_raw_put(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_MAP:
+ return nvkm_uvmm_mthd_raw_map(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_UNMAP:
+ return nvkm_uvmm_mthd_raw_unmap(uvmm, &args->v0);
+ case NVIF_VMM_RAW_V0_SPARSE:
+ return nvkm_uvmm_mthd_raw_sparse(uvmm, &args->v0);
+ default:
+ return -EINVAL;
+ };
+}
+
static int
nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
@@ -326,6 +500,7 @@ nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
+ case NVIF_VMM_V0_RAW : return nvkm_uvmm_mthd_raw (uvmm, argv, argc);
case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
if (uvmm->vmm->func->mthd) {
return uvmm->vmm->func->mthd(uvmm->vmm,
@@ -366,10 +541,11 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_uvmm *uvmm;
int ret = -ENOSYS;
u64 addr, size;
- bool managed;
+ bool managed, raw;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
- managed = args->v0.managed != 0;
+ managed = args->v0.type == NVIF_VMM_V0_TYPE_MANAGED;
+ raw = args->v0.type == NVIF_VMM_V0_TYPE_RAW;
addr = args->v0.addr;
size = args->v0.size;
} else
@@ -377,12 +553,13 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
return -ENOMEM;
+
nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
*pobject = &uvmm->object;
if (!mmu->vmm) {
- ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc,
- NULL, "user", &uvmm->vmm);
+ ret = mmu->func->vmm.ctor(mmu, managed || raw, addr, size,
+ argv, argc, NULL, "user", &uvmm->vmm);
if (ret)
return ret;
@@ -393,6 +570,7 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
}
+ uvmm->vmm->managed.raw = raw;
page = uvmm->vmm->func->page;
args->v0.page_nr = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index ae793f400ba1..eb5fcadcb39a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -677,40 +677,17 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
}
static void
-nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
- u64 addr, u64 size, bool sparse, bool pfn)
-{
- const struct nvkm_vmm_desc_func *func = page->desc->func;
- nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
- false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
- sparse ? func->sparse : func->invalid ? func->invalid :
- func->unmap);
-}
-
-static int
-nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
- u64 addr, u64 size, struct nvkm_vmm_map *map,
- nvkm_vmm_pte_func func)
-{
- u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
- false, nvkm_vmm_ref_ptes, func, map, NULL);
- if (fail != ~0ULL) {
- if ((size = fail - addr))
- nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
- return -ENOMEM;
- }
- return 0;
-}
-
-static void
nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
const struct nvkm_vmm_desc_func *func = page->desc->func;
+
+ mutex_lock(&vmm->mutex.map);
nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
NULL, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap);
+ mutex_unlock(&vmm->mutex.map);
}
static void
@@ -718,33 +695,108 @@ nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
+ mutex_lock(&vmm->mutex.map);
nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
NULL, func, map, NULL);
+ mutex_unlock(&vmm->mutex.map);
}
static void
-nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
- u64 addr, u64 size)
+nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
{
nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
nvkm_vmm_unref_ptes, NULL, NULL, NULL);
}
+static void
+nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ mutex_lock(&vmm->mutex.ref);
+ nvkm_vmm_ptes_put_locked(vmm, page, addr, size);
+ mutex_unlock(&vmm->mutex.ref);
+}
+
static int
nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
- u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
- nvkm_vmm_ref_ptes, NULL, NULL, NULL);
+ u64 fail;
+
+ mutex_lock(&vmm->mutex.ref);
+ fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
+ nvkm_vmm_ref_ptes, NULL, NULL, NULL);
if (fail != ~0ULL) {
if (fail != addr)
- nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
+ nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr);
+ mutex_unlock(&vmm->mutex.ref);
+ return -ENOMEM;
+ }
+ mutex_unlock(&vmm->mutex.ref);
+ return 0;
+}
+
+static void
+__nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, bool sparse, bool pfn)
+{
+ const struct nvkm_vmm_desc_func *func = page->desc->func;
+
+ nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
+ false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
+ sparse ? func->sparse : func->invalid ? func->invalid :
+ func->unmap);
+}
+
+static void
+nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, bool sparse, bool pfn)
+{
+ if (vmm->managed.raw) {
+ nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn);
+ nvkm_vmm_ptes_put(vmm, page, addr, size);
+ } else {
+ __nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn);
+ }
+}
+
+static int
+__nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, struct nvkm_vmm_map *map,
+ nvkm_vmm_pte_func func)
+{
+ u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
+ false, nvkm_vmm_ref_ptes, func, map, NULL);
+ if (fail != ~0ULL) {
+ if ((size = fail - addr))
+ nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
return -ENOMEM;
}
return 0;
}
-static inline struct nvkm_vma *
+static int
+nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, struct nvkm_vmm_map *map,
+ nvkm_vmm_pte_func func)
+{
+ int ret;
+
+ if (vmm->managed.raw) {
+ ret = nvkm_vmm_ptes_get(vmm, page, addr, size);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_ptes_map(vmm, page, addr, size, map, func);
+
+ return 0;
+ } else {
+ return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func);
+ }
+}
+
+struct nvkm_vma *
nvkm_vma_new(u64 addr, u64 size)
{
struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
@@ -1045,7 +1097,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
vmm->debug = mmu->subdev.debug;
kref_init(&vmm->kref);
- __mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
+ __mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key);
+ mutex_init(&vmm->mutex.ref);
+ mutex_init(&vmm->mutex.map);
/* Locate the smallest page size supported by the backend, it will
* have the deepest nesting of page tables.
@@ -1101,6 +1155,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
return ret;
+ vmm->managed.p.addr = 0;
+ vmm->managed.p.size = addr;
+
/* NVKM-managed area. */
if (size) {
if (!(vma = nvkm_vma_new(addr, size)))
@@ -1114,6 +1171,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
size = vmm->limit - addr;
if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
return ret;
+
+ vmm->managed.n.addr = addr;
+ vmm->managed.n.size = size;
} else {
/* Address-space fully managed by NVKM, requiring calls to
* nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
@@ -1362,9 +1422,9 @@ void
nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
if (vma->memory) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
nvkm_vmm_unmap_locked(vmm, vma, false);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
}
}
@@ -1423,6 +1483,8 @@ nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
nvkm_vmm_pte_func func;
int ret;
+ map->no_comp = vma->no_comp;
+
/* Make sure we won't overrun the end of the memory object. */
if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
@@ -1507,10 +1569,15 @@ nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
int ret;
- mutex_lock(&vmm->mutex);
+
+ if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
+ vmm->managed.raw)
+ return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
+
+ mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
vma->busy = false;
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
return ret;
}
@@ -1620,9 +1687,9 @@ nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
{
struct nvkm_vma *vma = *pvma;
if (vma) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
nvkm_vmm_put_locked(vmm, vma);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
*pvma = NULL;
}
}
@@ -1769,9 +1836,49 @@ int
nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
{
int ret;
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
+ return ret;
+}
+
+void
+nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
+ bool sparse, u8 refd)
+{
+ const struct nvkm_vmm_page *page = &vmm->func->page[refd];
+
+ nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false);
+}
+
+void
+nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+
+ nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
+}
+
+int
+nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+
+ if (unlikely(!size))
+ return -EINVAL;
+
+ return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size);
+}
+
+int
+nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
+{
+ int ret;
+
+ mutex_lock(&vmm->mutex.ref);
+ ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref);
+ mutex_unlock(&vmm->mutex.ref);
+
return ret;
}
@@ -1779,9 +1886,9 @@ void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
if (inst && vmm && vmm->func->part) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
vmm->func->part(vmm, inst);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
}
}
@@ -1790,9 +1897,9 @@ nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
int ret = 0;
if (vmm->func->join) {
- mutex_lock(&vmm->mutex);
+ mutex_lock(&vmm->mutex.vmm);
ret = vmm->func->join(vmm, inst);
- mutex_unlock(&vmm->mutex);
+ mutex_unlock(&vmm->mutex.vmm);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index f6188aa9171c..f9bc30cdb2b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -163,6 +163,7 @@ int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
u32 pd_header, bool managed, u64 addr, u64 size,
struct lock_class_key *, const char *name,
struct nvkm_vmm **);
+struct nvkm_vma *nvkm_vma_new(u64 addr, u64 size);
struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
u64 addr, u64 size);
@@ -173,6 +174,30 @@ void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);
+int nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
+void nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
+void nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
+ bool sparse, u8 refd);
+int nvkm_vmm_raw_sparse(struct nvkm_vmm *, u64 addr, u64 size, bool ref);
+
+static inline bool
+nvkm_vmm_in_managed_range(struct nvkm_vmm *vmm, u64 start, u64 size)
+{
+ u64 p_start = vmm->managed.p.addr;
+ u64 p_end = p_start + vmm->managed.p.size;
+ u64 n_start = vmm->managed.n.addr;
+ u64 n_end = n_start + vmm->managed.n.size;
+ u64 end = start + size;
+
+ if (start >= p_start && end <= p_end)
+ return true;
+
+ if (start >= n_start && end <= n_end)
+ return true;
+
+ return false;
+}
+
#define NVKM_VMM_PFN_ADDR 0xfffffffffffff000ULL
#define NVKM_VMM_PFN_ADDR_SHIFT 12
#define NVKM_VMM_PFN_APER 0x00000000000000f0ULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
index 5438384d9a67..5e857c02e9aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -287,15 +287,17 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
- ret = nvkm_memory_tags_get(memory, device, tags,
- nvkm_ltc_tags_clear,
- &map->tags);
- if (ret) {
- VMM_DEBUG(vmm, "comp %d", ret);
- return ret;
+ if (!map->no_comp) {
+ ret = nvkm_memory_tags_get(memory, device, tags,
+ nvkm_ltc_tags_clear,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
}
- if (map->tags->mn) {
+ if (!map->no_comp && map->tags->mn) {
u64 tags = map->tags->mn->offset + (map->offset >> 17);
if (page->shift == 17 || !gm20x) {
map->type |= tags << 44;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index 17899fc95b2d..f3630d0e0d55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -453,15 +453,17 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
- ret = nvkm_memory_tags_get(memory, device, tags,
- nvkm_ltc_tags_clear,
- &map->tags);
- if (ret) {
- VMM_DEBUG(vmm, "comp %d", ret);
- return ret;
+ if (!map->no_comp) {
+ ret = nvkm_memory_tags_get(memory, device, tags,
+ nvkm_ltc_tags_clear,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
}
- if (map->tags->mn) {
+ if (!map->no_comp && map->tags->mn) {
tags = map->tags->mn->offset + (map->offset >> 16);
map->ctag |= ((1ULL << page->shift) >> 16) << 36;
map->type |= tags << 36;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index b7548dcd72c7..ff08ad5005a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -296,19 +296,22 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
- ret = nvkm_memory_tags_get(memory, device, tags, NULL,
- &map->tags);
- if (ret) {
- VMM_DEBUG(vmm, "comp %d", ret);
- return ret;
- }
+ if (!map->no_comp) {
+ ret = nvkm_memory_tags_get(memory, device, tags, NULL,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
- if (map->tags->mn) {
- u32 tags = map->tags->mn->offset + (map->offset >> 16);
- map->ctag |= (u64)comp << 49;
- map->type |= (u64)comp << 47;
- map->type |= (u64)tags << 49;
- map->next |= map->ctag;
+ if (map->tags->mn) {
+ u32 tags = map->tags->mn->offset +
+ (map->offset >> 16);
+ map->ctag |= (u64)comp << 49;
+ map->type |= (u64)comp << 47;
+ map->type |= (u64)tags << 49;
+ map->next |= map->ctag;
+ }
}
}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index b4ac76c9f31b..b715301ec79f 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -4,7 +4,7 @@ config DRM_OMAP
depends on DRM && OF
depends on ARCH_OMAP2PLUS
select DRM_KMS_HELPER
- select FB_SYS_HELPERS if DRM_FBDEV_EMULATION
+ select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
select VIDEOMODE_HELPERS
select HDMI
default n
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index aacad5045e95..c26aab4939fa 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -4858,10 +4858,9 @@ static int dispc_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &dispc_component_ops);
}
-static int dispc_remove(struct platform_device *pdev)
+static void dispc_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &dispc_component_ops);
- return 0;
}
static __maybe_unused int dispc_runtime_suspend(struct device *dev)
@@ -4913,7 +4912,7 @@ static const struct dev_pm_ops dispc_pm_ops = {
struct platform_driver omap_dispchw_driver = {
.probe = dispc_probe,
- .remove = dispc_remove,
+ .remove_new = dispc_remove,
.driver = {
.name = "omapdss_dispc",
.pm = &dispc_pm_ops,
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 4c1084eb0175..ea63c64d3a1a 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5044,7 +5044,7 @@ err_pm_disable:
return r;
}
-static int dsi_remove(struct platform_device *pdev)
+static void dsi_remove(struct platform_device *pdev)
{
struct dsi_data *dsi = platform_get_drvdata(pdev);
@@ -5060,8 +5060,6 @@ static int dsi_remove(struct platform_device *pdev)
regulator_disable(dsi->vdds_dsi_reg);
dsi->vdds_dsi_enabled = false;
}
-
- return 0;
}
static __maybe_unused int dsi_runtime_suspend(struct device *dev)
@@ -5095,7 +5093,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
struct platform_driver omap_dsihw_driver = {
.probe = dsi_probe,
- .remove = dsi_remove,
+ .remove_new = dsi_remove,
.driver = {
.name = "omapdss_dsi",
.pm = &dsi_pm_ops,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index c4febb861910..02955f976845 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1532,7 +1532,7 @@ err_free_dss:
return r;
}
-static int dss_remove(struct platform_device *pdev)
+static void dss_remove(struct platform_device *pdev)
{
struct dss_device *dss = platform_get_drvdata(pdev);
@@ -1557,8 +1557,6 @@ static int dss_remove(struct platform_device *pdev)
dss_put_clocks(dss);
kfree(dss);
-
- return 0;
}
static void dss_shutdown(struct platform_device *pdev)
@@ -1607,7 +1605,7 @@ static const struct dev_pm_ops dss_pm_ops = {
struct platform_driver omap_dsshw_driver = {
.probe = dss_probe,
- .remove = dss_remove,
+ .remove_new = dss_remove,
.shutdown = dss_shutdown,
.driver = {
.name = "omapdss_dss",
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index a8a75dc24751..a26b77d99d52 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -824,7 +824,7 @@ err_free:
return r;
}
-static int hdmi4_remove(struct platform_device *pdev)
+static void hdmi4_remove(struct platform_device *pdev)
{
struct omap_hdmi *hdmi = platform_get_drvdata(pdev);
@@ -835,7 +835,6 @@ static int hdmi4_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
kfree(hdmi);
- return 0;
}
static const struct of_device_id hdmi_of_match[] = {
@@ -845,7 +844,7 @@ static const struct of_device_id hdmi_of_match[] = {
struct platform_driver omapdss_hdmi4hw_driver = {
.probe = hdmi4_probe,
- .remove = hdmi4_remove,
+ .remove_new = hdmi4_remove,
.driver = {
.name = "omapdss_hdmi",
.of_match_table = hdmi_of_match,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 868712cd8a3a..e6611c683857 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -798,7 +798,7 @@ err_free:
return r;
}
-static int hdmi5_remove(struct platform_device *pdev)
+static void hdmi5_remove(struct platform_device *pdev)
{
struct omap_hdmi *hdmi = platform_get_drvdata(pdev);
@@ -809,7 +809,6 @@ static int hdmi5_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
kfree(hdmi);
- return 0;
}
static const struct of_device_id hdmi_of_match[] = {
@@ -820,7 +819,7 @@ static const struct of_device_id hdmi_of_match[] = {
struct platform_driver omapdss_hdmi5hw_driver = {
.probe = hdmi5_probe,
- .remove = hdmi5_remove,
+ .remove_new = hdmi5_remove,
.driver = {
.name = "omapdss_hdmi5",
.of_match_table = hdmi_of_match,
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 4480b69ab5a7..f163d52a7c7d 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -865,7 +865,7 @@ err_free:
return r;
}
-static int venc_remove(struct platform_device *pdev)
+static void venc_remove(struct platform_device *pdev)
{
struct venc_device *venc = platform_get_drvdata(pdev);
@@ -876,7 +876,6 @@ static int venc_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
kfree(venc);
- return 0;
}
static __maybe_unused int venc_runtime_suspend(struct device *dev)
@@ -913,7 +912,7 @@ static const struct of_device_id venc_of_match[] = {
struct platform_driver omap_venchw_driver = {
.probe = venc_probe,
- .remove = venc_remove,
+ .remove_new = venc_remove,
.driver = {
.name = "omapdss_venc",
.pm = &venc_pm_ops,
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 61a27dd7392e..9753c1e1f994 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -723,7 +723,7 @@ bool dmm_is_available(void)
return omap_dmm ? true : false;
}
-static int omap_dmm_remove(struct platform_device *dev)
+static void omap_dmm_remove(struct platform_device *dev)
{
struct tiler_block *block, *_block;
int i;
@@ -763,8 +763,6 @@ static int omap_dmm_remove(struct platform_device *dev)
kfree(omap_dmm);
omap_dmm = NULL;
}
-
- return 0;
}
static int omap_dmm_probe(struct platform_device *dev)
@@ -982,8 +980,7 @@ static int omap_dmm_probe(struct platform_device *dev)
return 0;
fail:
- if (omap_dmm_remove(dev))
- dev_err(&dev->dev, "cleanup failed\n");
+ omap_dmm_remove(dev);
return ret;
}
@@ -1213,7 +1210,7 @@ static const struct of_device_id dmm_of_match[] = {
struct platform_driver omap_dmm_driver = {
.probe = omap_dmm_probe,
- .remove = omap_dmm_remove,
+ .remove_new = omap_dmm_remove,
.driver = {
.owner = THIS_MODULE,
.name = DMM_DRIVER_NAME,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 5ed549726104..afeeb7737552 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -636,17 +636,7 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-static const struct file_operations omapdriver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .release = drm_release,
- .mmap = omap_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(omapdriver_fops);
static const struct drm_driver omap_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
@@ -655,8 +645,6 @@ static const struct drm_driver omap_drm_driver = {
#ifdef CONFIG_DEBUG_FS
.debugfs_init = omap_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = omap_gem_prime_import,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
@@ -821,14 +809,12 @@ static int pdev_probe(struct platform_device *pdev)
return ret;
}
-static int pdev_remove(struct platform_device *pdev)
+static void pdev_remove(struct platform_device *pdev)
{
struct omap_drm_private *priv = platform_get_drvdata(pdev);
omapdrm_cleanup(priv);
kfree(priv);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -859,7 +845,7 @@ static struct platform_driver pdev = {
.pm = &omapdrm_pm_ops,
},
.probe = pdev_probe,
- .remove = pdev_remove,
+ .remove_new = pdev_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b7ccce0704a3..6b08b137af1a 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -76,6 +76,15 @@ fallback:
return drm_fb_helper_pan_display(var, fbi);
}
+static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_framebuffer *fb = helper->fb;
+ struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
+
+ return drm_gem_mmap_obj(bo, omap_gem_mmap_size(bo), vma);
+}
+
static void omap_fbdev_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@@ -97,14 +106,16 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
static const struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
- FB_DEFAULT_SYS_OPS,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = omap_fbdev_pan_display,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_ioctl = drm_fb_helper_ioctl,
- .fb_destroy = omap_fbdev_fb_destroy,
+ .fb_mmap = omap_fbdev_fb_mmap,
+ .fb_destroy = omap_fbdev_fb_destroy,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
@@ -196,6 +207,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(fbi, helper, sizes);
+ fbi->flags |= FBINFO_VIRTFB;
fbi->screen_buffer = omap_gem_vaddr(bo);
fbi->screen_size = bo->size;
fbi->fix.smem_start = dma_addr;
@@ -318,10 +330,6 @@ void omap_fbdev_setup(struct drm_device *dev)
INIT_WORK(&fbdev->work, pan_worker);
- ret = omap_fbdev_client_hotplug(&helper->client);
- if (ret)
- drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
drm_client_register(&helper->client);
return;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 6b58a5bb7b44..c48fa531ca32 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -524,26 +524,11 @@ fail:
return ret;
}
-/** We override mainly to fix up some of the vm mapping flags.. */
-int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret) {
- DBG("mmap failed: %d", ret);
- return ret;
- }
-
- return omap_gem_mmap_obj(vma->vm_private_data, vma);
-}
-
-int omap_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
+static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_IO | VM_MIXEDMAP);
if (omap_obj->flags & OMAP_BO_WC) {
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
@@ -563,12 +548,14 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
- vma->vm_pgoff = 0;
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma_set_file(vma, obj->filp);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return 0;
}
@@ -1282,6 +1269,7 @@ static const struct vm_operations_struct omap_gem_vm_ops = {
static const struct drm_gem_object_funcs omap_gem_object_funcs = {
.free = omap_gem_free_object,
.export = omap_gem_prime_export,
+ .mmap = omap_gem_object_mmap,
.vm_ops = &omap_gem_vm_ops,
};
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
index 4d4488939f6b..fec3fa0e4c33 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.h
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -57,9 +57,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
/* mmap() Interface */
-int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int omap_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
u64 omap_gem_mmap_offset(struct drm_gem_object *obj);
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 3abc47521b2c..36f9ee4baad3 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -64,15 +64,8 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = buffer->priv;
- int ret = 0;
- dma_resv_assert_held(buffer->resv);
-
- ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
- if (ret < 0)
- return ret;
-
- return omap_gem_mmap_obj(obj, vma);
+ return drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
}
static const struct dma_buf_ops omap_dmabuf_ops = {
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 203c0ef0bbfd..869e535faefa 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -300,6 +300,7 @@ config DRM_PANEL_LEADTEK_LTK500HD1829
config DRM_PANEL_SAMSUNG_LD9040
tristate "Samsung LD9040 RGB/SPI panel"
depends on OF && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
config DRM_PANEL_LG_LB035Q02
@@ -733,6 +734,17 @@ config DRM_PANEL_SONY_TULIP_TRULY_NT35521
NT35521 1280x720 video mode panel as found on Sony Xperia M4
Aqua phone.
+config DRM_PANEL_STARTEK_KD070FHFID015
+ tristate "STARTEK KD070FHFID015 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for STARTEK KD070FHFID015 DSI panel
+ based on RENESAS-R69429 controller. The pannel is a 7-inch TFT LCD display
+ with a resolution of 1024 x 600 pixels. It provides a MIPI DSI interface to
+ the host, a built-in LED backlight and touch controller.
+
config DRM_PANEL_TDO_TL070WSH30
tristate "TDO TL070WSH30 DSI panel"
depends on OF
@@ -793,6 +805,17 @@ config DRM_PANEL_VISIONOX_VTDR6130
Say Y here if you want to enable support for Visionox
VTDR6130 1080x2400 AMOLED DSI panel.
+config DRM_PANEL_VISIONOX_R66451
+ tristate "Visionox R66451"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
+ help
+ Say Y here if you want to enable support for Visionox
+ R66451 1080x2340 AMOLED DSI panel.
+
config DRM_PANEL_WIDECHIPS_WS2401
tristate "Widechips WS2401 DPI panel driver"
depends on SPI && GPIOLIB
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 30cf553c8d1d..433e93d57949 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DRM_PANEL_SONY_TD4353_JDI) += panel-sony-td4353-jdi.o
obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o
+obj-$(CONFIG_DRM_PANEL_STARTEK_KD070FHFID015) += panel-startek-kd070fhfid015.o
obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o
obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
@@ -81,5 +82,6 @@ obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_R66451) += panel-visionox-r66451.o
obj-$(CONFIG_DRM_PANEL_WIDECHIPS_WS2401) += panel-widechips-ws2401.o
obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
index 1cc0f1d09684..662c7bcbe6e5 100644
--- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
+++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
@@ -11,7 +11,8 @@
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
diff --git a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
index 3c976a98de6a..6c86ebf2cad7 100644
--- a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
+++ b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
@@ -11,8 +11,8 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
index d879b3b14c48..11b64acbe8a9 100644
--- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 3cc9fb0d4f5d..5ac926281d2c 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -8,7 +8,6 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_connector.h>
@@ -2139,9 +2138,9 @@ static const struct panel_desc starry_himax83102_j02_desc = {
static const struct drm_display_mode starry_ili9882t_default_mode = {
.clock = 165280,
.hdisplay = 1200,
- .hsync_start = 1200 + 32,
- .hsync_end = 1200 + 32 + 30,
- .htotal = 1200 + 32 + 30 + 32,
+ .hsync_start = 1200 + 72,
+ .hsync_end = 1200 + 72 + 30,
+ .htotal = 1200 + 72 + 30 + 72,
.vdisplay = 1920,
.vsync_start = 1920 + 68,
.vsync_end = 1920 + 68 + 2,
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index ba17bcc4461c..6b3f4d664d2a 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -11,7 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/jiffies.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_connector.h>
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index fbd114b4f0be..feb665df35a1 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1866,6 +1866,7 @@ static const struct panel_delay delay_200_500_e200 = {
*/
static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
@@ -1889,6 +1890,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1153, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x854a, &delay_200_500_p2e100, "M133NW4J"),
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
index 76572c922983..986e3e192881 100644
--- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -7,7 +7,6 @@
#include <linux/delay.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_mipi_dsi.h>
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index df493da50afe..48e3acaecdf3 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -11,7 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/regulator/consumer.h>
#define FEIYANG_INIT_CMD_LEN 2
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index d4fb5d1b295b..c73243d85de7 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -15,7 +15,7 @@
#include <linux/media-bus-format.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 3dfafa585127..61c872f0f7ca 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -22,7 +22,8 @@
#include <linux/bitops.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 3fdf884b3257..3574681891e8 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -23,7 +23,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 1ec696adf9de..7838947a1bf3 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -9,7 +9,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
@@ -455,6 +455,174 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */
};
+static const struct ili9881c_instr tl050hdv35_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0x00),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x07, 0x00),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x1d),
+ ILI9881C_COMMAND_INSTR(0x10, 0x1d),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x06),
+ ILI9881C_COMMAND_INSTR(0x21, 0x02),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x60, 0x00),
+ ILI9881C_COMMAND_INSTR(0x61, 0x15),
+ ILI9881C_COMMAND_INSTR(0x62, 0x14),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x65, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x66, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x67, 0x06),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x07),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x01),
+ ILI9881C_COMMAND_INSTR(0x76, 0x00),
+ ILI9881C_COMMAND_INSTR(0x77, 0x14),
+ ILI9881C_COMMAND_INSTR(0x78, 0x15),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x07),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x38, 0x01),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2b),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x18),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+ ILI9881C_COMMAND_INSTR(0xb5, 0x06),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x24),
+ ILI9881C_COMMAND_INSTR(0x35, 0x1f),
+ ILI9881C_COMMAND_INSTR(0x33, 0x14),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x98),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x33),
+ ILI9881C_COMMAND_INSTR(0x53, 0xa2),
+ ILI9881C_COMMAND_INSTR(0x55, 0x92),
+ ILI9881C_COMMAND_INSTR(0x50, 0x96),
+ ILI9881C_COMMAND_INSTR(0x51, 0x96),
+ ILI9881C_COMMAND_INSTR(0x60, 0x22),
+ ILI9881C_COMMAND_INSTR(0x61, 0x00),
+ ILI9881C_COMMAND_INSTR(0x62, 0x19),
+ ILI9881C_COMMAND_INSTR(0x63, 0x00),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x11),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x19),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x0d),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x0d),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x14),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x17),
+ ILI9881C_COMMAND_INSTR(0xa8, 0x4f),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x27),
+ ILI9881C_COMMAND_INSTR(0xab, 0x49),
+ ILI9881C_COMMAND_INSTR(0xac, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xad, 0x18),
+ ILI9881C_COMMAND_INSTR(0xae, 0x4c),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x22),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x60),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x11),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x19),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x0d),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x0d),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x14),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x17),
+ ILI9881C_COMMAND_INSTR(0xc8, 0x4f),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xca, 0x27),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x49),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x18),
+ ILI9881C_COMMAND_INSTR(0xce, 0x4c),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x33),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x60),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x39),
+ ILI9881C_SWITCH_PAGE_INSTR(0),
+ ILI9881C_COMMAND_INSTR(0x36, 0x03),
+};
+
static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(3),
ILI9881C_COMMAND_INSTR(0x01, 0x00),
@@ -812,6 +980,23 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
.height_mm = 217,
};
+static const struct drm_display_mode tl050hdv35_default_mode = {
+ .clock = 59400,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 18,
+ .hsync_end = 720 + 18 + 3,
+ .htotal = 720 + 18 + 3 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 26,
+ .vsync_end = 1280 + 26 + 6,
+ .vtotal = 1280 + 26 + 6 + 28,
+
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
static const struct drm_display_mode w552946aba_default_mode = {
.clock = 64000,
@@ -944,6 +1129,14 @@ static const struct ili9881c_desc k101_im2byl02_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
};
+static const struct ili9881c_desc tl050hdv35_desc = {
+ .init = tl050hdv35_init,
+ .init_length = ARRAY_SIZE(tl050hdv35_init),
+ .mode = &tl050hdv35_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+};
+
static const struct ili9881c_desc w552946aba_desc = {
.init = w552946ab_init,
.init_length = ARRAY_SIZE(w552946ab_init),
@@ -955,6 +1148,7 @@ static const struct ili9881c_desc w552946aba_desc = {
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
+ { .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ }
};
diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
index b2b0ebc9e943..8fdbda59be48 100644
--- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c
+++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
@@ -11,7 +11,8 @@
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 9992d0d4c0e5..485178a99910 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -7,7 +7,6 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 323c33c9c37a..4879835fe101 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -16,7 +16,7 @@
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#define JD9365DA_INIT_CMD_LEN 2
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 8f4f137a2af6..213008499caa 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -404,38 +404,30 @@ static int jdi_panel_add(struct jdi_panel *jdi)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(jdi->supplies),
jdi->supplies);
- if (ret < 0) {
- dev_err(dev, "failed to init regulator, ret=%d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to init regulator, ret=%d\n", ret);
jdi->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(jdi->enable_gpio)) {
- ret = PTR_ERR(jdi->enable_gpio);
- dev_err(dev, "cannot get enable-gpio %d\n", ret);
- return ret;
+ return dev_err_probe(dev, PTR_ERR(jdi->enable_gpio),
+ "cannot get enable-gpio %d\n", ret);
}
jdi->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(jdi->reset_gpio)) {
- ret = PTR_ERR(jdi->reset_gpio);
- dev_err(dev, "cannot get reset-gpios %d\n", ret);
- return ret;
- }
+ if (IS_ERR(jdi->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(jdi->reset_gpio),
+ "cannot get reset-gpios %d\n", ret);
jdi->dcdc_en_gpio = devm_gpiod_get(dev, "dcdc-en", GPIOD_OUT_LOW);
- if (IS_ERR(jdi->dcdc_en_gpio)) {
- ret = PTR_ERR(jdi->dcdc_en_gpio);
- dev_err(dev, "cannot get dcdc-en-gpio %d\n", ret);
- return ret;
- }
+ if (IS_ERR(jdi->dcdc_en_gpio))
+ return dev_err_probe(dev, PTR_ERR(jdi->dcdc_en_gpio),
+ "cannot get dcdc-en-gpio %d\n", ret);
jdi->backlight = drm_panel_create_dsi_backlight(jdi->dsi);
- if (IS_ERR(jdi->backlight)) {
- ret = PTR_ERR(jdi->backlight);
- dev_err(dev, "failed to register backlight %d\n", ret);
- return ret;
- }
+ if (IS_ERR(jdi->backlight))
+ return dev_err_probe(dev, PTR_ERR(jdi->backlight),
+ "failed to register backlight %d\n", ret);
drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index d2efd887484b..d41482d3a34f 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -8,7 +8,6 @@
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <video/display_timing.h>
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index de8758c30e6e..1b8e3156914c 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -10,7 +10,7 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -228,15 +228,13 @@ static int panel_lvds_probe(struct platform_device *pdev)
return 0;
}
-static int panel_lvds_remove(struct platform_device *pdev)
+static void panel_lvds_remove(struct platform_device *pdev)
{
struct panel_lvds *lvds = platform_get_drvdata(pdev);
drm_panel_remove(&lvds->panel);
drm_panel_disable(&lvds->panel);
-
- return 0;
}
static const struct of_device_id panel_lvds_of_table[] = {
@@ -248,7 +246,7 @@ MODULE_DEVICE_TABLE(of, panel_lvds_of_table);
static struct platform_driver panel_lvds_driver = {
.probe = panel_lvds_probe,
- .remove = panel_lvds_remove,
+ .remove_new = panel_lvds_remove,
.driver = {
.name = "panel-lvds",
.of_match_table = panel_lvds_of_table,
diff --git a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
index 26d358b9b85a..799c2161fc85 100644
--- a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
+++ b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
@@ -18,7 +18,6 @@
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 9243b2ad828d..ea4a6bf6d35b 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -10,7 +10,7 @@
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index a07958038ffd..ad98dd9322b4 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -13,7 +13,7 @@
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/display_timing.h>
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
index cf078f0d3cd3..71e57de6d8b2 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
@@ -11,7 +11,8 @@
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 493c3c23f0d6..d6dceb858008 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -26,7 +26,7 @@
#include <linux/bitops.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index cc7f96d70826..5bbea734123b 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -18,7 +18,6 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 8b108ac80b55..412ca84d0581 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -8,7 +8,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index c3befa7f253d..9632b9e95b71 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
index 73bcffa1e0c1..33fb3d715e54 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index f58cfb10b58a..059260262b5a 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -12,7 +12,6 @@
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
diff --git a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
index e46be5014d42..c415dacf1816 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
@@ -12,7 +12,6 @@
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 90ea91e4311d..4618c892cdd6 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -47,7 +47,6 @@
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/pm.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-db7430.c b/drivers/gpu/drm/panel/panel-samsung-db7430.c
index 117b26845083..14c6700e37b3 100644
--- a/drivers/gpu/drm/panel/panel-samsung-db7430.c
+++ b/drivers/gpu/drm/panel/panel-samsung-db7430.c
@@ -56,10 +56,6 @@ struct db7430 {
struct mipi_dbi dbi;
/** @panel: the DRM panel instance for this device */
struct drm_panel panel;
- /** @width: the width of this panel in mm */
- u32 width;
- /** @height: the height of this panel in mm */
- u32 height;
/** @reset: reset GPIO line */
struct gpio_desc *reset;
/** @regulators: VCCIO and VIO supply regulators */
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 01eb211f32f7..9f438683a6f6 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -8,6 +8,7 @@
* Andrzej Hajda <[email protected]>
*/
+#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
@@ -180,15 +181,15 @@ static void ld9040_init(struct ld9040 *ctx)
{
ld9040_dcs_write_seq_static(ctx, MCS_USER_SETTING, 0x5a, 0x5a);
ld9040_dcs_write_seq_static(ctx, MCS_PANEL_CONDITION,
- 0x05, 0x65, 0x96, 0x71, 0x7d, 0x19, 0x3b, 0x0d,
- 0x19, 0x7e, 0x0d, 0xe2, 0x00, 0x00, 0x7e, 0x7d,
- 0x07, 0x07, 0x20, 0x20, 0x20, 0x02, 0x02);
+ 0x05, 0x5e, 0x96, 0x6b, 0x7d, 0x0d, 0x3f, 0x00,
+ 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0x05, 0x1f, 0x1f, 0x1f, 0x00, 0x00);
ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL,
- 0x02, 0x08, 0x08, 0x10, 0x10);
+ 0x02, 0x06, 0x0a, 0x10, 0x10);
ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04);
ld9040_dcs_write_seq_static(ctx, MCS_POWER_CTRL,
0x0a, 0x87, 0x25, 0x6a, 0x44, 0x02, 0x88);
- ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16);
+ ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0f, 0x00, 0x16);
ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00);
ld9040_brightness_set(ctx);
ld9040_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
@@ -310,8 +311,30 @@ static int ld9040_parse_dt(struct ld9040 *ctx)
return 0;
}
+static int ld9040_bl_update_status(struct backlight_device *dev)
+{
+ struct ld9040 *ctx = bl_get_data(dev);
+
+ ctx->brightness = backlight_get_brightness(dev);
+ ld9040_brightness_set(ctx);
+
+ return 0;
+}
+
+static const struct backlight_ops ld9040_bl_ops = {
+ .update_status = ld9040_bl_update_status,
+};
+
+static const struct backlight_properties ld9040_bl_props = {
+ .type = BACKLIGHT_RAW,
+ .scale = BACKLIGHT_SCALE_NON_LINEAR,
+ .max_brightness = ARRAY_SIZE(ld9040_gammas) - 1,
+ .brightness = ARRAY_SIZE(ld9040_gammas) - 1,
+};
+
static int ld9040_probe(struct spi_device *spi)
{
+ struct backlight_device *bldev;
struct device *dev = &spi->dev;
struct ld9040 *ctx;
int ret;
@@ -323,7 +346,7 @@ static int ld9040_probe(struct spi_device *spi)
spi_set_drvdata(spi, ctx);
ctx->dev = dev;
- ctx->brightness = ARRAY_SIZE(ld9040_gammas) - 1;
+ ctx->brightness = ld9040_bl_props.brightness;
ret = ld9040_parse_dt(ctx);
if (ret < 0)
@@ -353,6 +376,12 @@ static int ld9040_probe(struct spi_device *spi)
drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
+ bldev = devm_backlight_device_register(dev, dev_name(dev), dev,
+ ctx, &ld9040_bl_ops,
+ &ld9040_bl_props);
+ if (IS_ERR(bldev))
+ return PTR_ERR(bldev);
+
drm_panel_add(&ctx->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index 008e2b0d6652..79f611963c61 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -11,7 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
struct s6d16d0 {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
index 102e1fc7ee38..ea5a85779382 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <video/mipi_display.h>
#include <drm/drm_mipi_dsi.h>
@@ -66,7 +65,6 @@ static void s6d7aa0_reset(struct s6d7aa0 *ctx)
static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- int ret = 0;
if (lock) {
mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0xa5, 0xa5);
@@ -80,7 +78,7 @@ static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock)
mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0xa5, 0xa5);
}
- return ret;
+ return 0;
}
static int s6d7aa0_on(struct s6d7aa0 *ctx)
@@ -569,6 +567,7 @@ static const struct of_device_id s6d7aa0_of_match[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, s6d7aa0_of_match);
static struct mipi_dsi_driver s6d7aa0_driver = {
.probe = s6d7aa0_probe,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 39eef3dce7c9..639a4fdf57bb 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -12,7 +12,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_mipi_dsi.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
index ed3895e4ca5e..a89d925fdfb2 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 1ebb79e3103c..cbf9607dd576 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -8,7 +8,6 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <linux/backlight.h>
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index c250ca36a5b3..658c7c040570 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -278,14 +278,12 @@ static int seiko_panel_probe(struct device *dev,
return 0;
}
-static int seiko_panel_remove(struct platform_device *pdev)
+static void seiko_panel_remove(struct platform_device *pdev)
{
struct seiko_panel *panel = platform_get_drvdata(pdev);
drm_panel_remove(&panel->base);
drm_panel_disable(&panel->base);
-
- return 0;
}
static void seiko_panel_shutdown(struct platform_device *pdev)
@@ -347,7 +345,7 @@ static struct platform_driver seiko_panel_platform_driver = {
.of_match_table = platform_of_match,
},
.probe = seiko_panel_platform_probe,
- .remove = seiko_panel_remove,
+ .remove_new = seiko_panel_remove,
.shutdown = seiko_panel_shutdown,
};
module_platform_driver(seiko_panel_platform_driver);
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index a07d0f6c3e69..76bd9e810827 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -189,15 +189,13 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
return 0;
}
-static int ls037v7dw01_remove(struct platform_device *pdev)
+static void ls037v7dw01_remove(struct platform_device *pdev)
{
struct ls037v7dw01_panel *lcd = platform_get_drvdata(pdev);
drm_panel_remove(&lcd->panel);
drm_panel_disable(&lcd->panel);
drm_panel_unprepare(&lcd->panel);
-
- return 0;
}
static const struct of_device_id ls037v7dw01_of_match[] = {
@@ -209,7 +207,7 @@ MODULE_DEVICE_TABLE(of, ls037v7dw01_of_match);
static struct platform_driver ls037v7dw01_driver = {
.probe = ls037v7dw01_probe,
- .remove = ls037v7dw01_remove,
+ .remove_new = ls037v7dw01_remove,
.driver = {
.name = "panel-sharp-ls037v7dw01",
.of_match_table = ls037v7dw01_of_match,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index a247a0e7c799..95959dcc6e0e 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -141,7 +141,6 @@ struct panel_simple {
bool prepared;
- ktime_t prepared_time;
ktime_t unprepared_time;
const struct panel_desc *desc;
@@ -351,8 +350,6 @@ static int panel_simple_resume(struct device *dev)
if (p->desc->delay.prepare)
msleep(p->desc->delay.prepare);
- p->prepared_time = ktime_get_boottime();
-
return 0;
}
@@ -566,7 +563,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -ENOMEM;
panel->enabled = false;
- panel->prepared_time = 0;
panel->desc = desc;
panel->supply = devm_regulator_get(dev, "power");
@@ -999,21 +995,21 @@ static const struct panel_desc auo_g104sn02 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode auo_g121ean01_mode = {
- .clock = 66700,
- .hdisplay = 1280,
- .hsync_start = 1280 + 58,
- .hsync_end = 1280 + 58 + 8,
- .htotal = 1280 + 58 + 8 + 70,
- .vdisplay = 800,
- .vsync_start = 800 + 6,
- .vsync_end = 800 + 6 + 4,
- .vtotal = 800 + 6 + 4 + 10,
+static const struct display_timing auo_g121ean01_timing = {
+ .pixelclock = { 60000000, 74400000, 90000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 20, 50, 100 },
+ .hback_porch = { 20, 50, 100 },
+ .hsync_len = { 30, 100, 200 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 2, 10, 25 },
+ .vback_porch = { 2, 10, 25 },
+ .vsync_len = { 4, 18, 50 },
};
static const struct panel_desc auo_g121ean01 = {
- .modes = &auo_g121ean01_mode,
- .num_modes = 1,
+ .timings = &auo_g121ean01_timing,
+ .num_timings = 1,
.bpc = 8,
.size = {
.width = 261,
@@ -1189,7 +1185,9 @@ static const struct panel_desc auo_t215hvn01 = {
.delay = {
.disable = 5,
.unprepare = 1000,
- }
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode avic_tm070ddh03_mode = {
@@ -2178,6 +2176,7 @@ static const struct panel_desc innolux_at043tn24 = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
@@ -2377,6 +2376,37 @@ static const struct panel_desc innolux_g121x1_l03 = {
},
};
+static const struct display_timing innolux_g156hce_l01_timings = {
+ .pixelclock = { 120000000, 141860000, 150000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 80, 90, 100 },
+ .hback_porch = { 80, 90, 100 },
+ .hsync_len = { 20, 30, 30 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 3, 10, 20 },
+ .vback_porch = { 3, 10, 20 },
+ .vsync_len = { 4, 10, 10 },
+};
+
+static const struct panel_desc innolux_g156hce_l01 = {
+ .timings = &innolux_g156hce_l01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 344,
+ .height = 194,
+ },
+ .delay = {
+ .prepare = 1, /* T1+T2 */
+ .enable = 450, /* T5 */
+ .disable = 200, /* T6 */
+ .unprepare = 10, /* T3+T7 */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode innolux_n156bge_l21_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -3202,11 +3232,13 @@ static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
.vsync_start = 480 + 49,
.vsync_end = 480 + 49 + 2,
.vtotal = 480 + 49 + 2 + 22,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc powertip_ph800480t013_idf02 = {
.modes = &powertip_ph800480t013_idf02_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 152,
.height = 91,
@@ -4242,6 +4274,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,g121x1-l03",
.data = &innolux_g121x1_l03,
}, {
+ .compatible = "innolux,g156hce-l01",
+ .data = &innolux_g156hce_l01,
+ }, {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
@@ -4457,20 +4492,18 @@ MODULE_DEVICE_TABLE(of, platform_of_match);
static int panel_simple_platform_probe(struct platform_device *pdev)
{
- const struct of_device_id *id;
+ const struct panel_desc *desc;
- id = of_match_node(platform_of_match, pdev->dev.of_node);
- if (!id)
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
return -ENODEV;
- return panel_simple_probe(&pdev->dev, id->data);
+ return panel_simple_probe(&pdev->dev, desc);
}
-static int panel_simple_platform_remove(struct platform_device *pdev)
+static void panel_simple_platform_remove(struct platform_device *pdev)
{
panel_simple_remove(&pdev->dev);
-
- return 0;
}
static void panel_simple_platform_shutdown(struct platform_device *pdev)
@@ -4491,7 +4524,7 @@ static struct platform_driver panel_simple_platform_driver = {
.pm = &panel_simple_pm_ops,
},
.probe = panel_simple_platform_probe,
- .remove = panel_simple_platform_remove,
+ .remove_new = panel_simple_platform_remove,
.shutdown = panel_simple_platform_shutdown,
};
@@ -4736,15 +4769,12 @@ MODULE_DEVICE_TABLE(of, dsi_of_match);
static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
{
const struct panel_desc_dsi *desc;
- const struct of_device_id *id;
int err;
- id = of_match_node(dsi_of_match, dsi->dev.of_node);
- if (!id)
+ desc = of_device_get_match_data(&dsi->dev);
+ if (!desc)
return -ENODEV;
- desc = id->data;
-
err = panel_simple_probe(&dsi->dev, &desc->desc);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 7eae83aa0ea1..0459965e1b4f 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -12,7 +12,7 @@
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 3aa31f3d6157..6a3945639535 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -13,7 +13,7 @@
#include <linux/media-bus-format.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <video/display_timing.h>
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index bbc4569cbcdc..88e80fe98112 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -10,14 +10,12 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <linux/media-bus-format.h>
#include <drm/drm_device.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#define ST7789V_COLMOD_RGB_FMT_18BITS (6 << 4)
-#define ST7789V_COLMOD_CTRL_FMT_18BITS (6 << 0)
-
#define ST7789V_RAMCTRL_CMD 0xb0
#define ST7789V_RAMCTRL_RM_RGB BIT(4)
#define ST7789V_RAMCTRL_DM_RGB BIT(0)
@@ -29,7 +27,8 @@
#define ST7789V_RGBCTRL_RCM(n) (((n) & 3) << 5)
#define ST7789V_RGBCTRL_VSYNC_HIGH BIT(3)
#define ST7789V_RGBCTRL_HSYNC_HIGH BIT(2)
-#define ST7789V_RGBCTRL_PCLK_HIGH BIT(1)
+#define ST7789V_RGBCTRL_PCLK_FALLING BIT(1)
+#define ST7789V_RGBCTRL_DE_LOW BIT(0)
#define ST7789V_RGBCTRL_VBP(n) ((n) & 0x7f)
#define ST7789V_RGBCTRL_HBP(n) ((n) & 0x1f)
@@ -111,11 +110,26 @@
return val; \
} while (0)
+#define ST7789V_IDS { 0x85, 0x85, 0x52 }
+#define ST7789V_IDS_SIZE 3
+
+struct st7789_panel_info {
+ const struct drm_display_mode *mode;
+ u32 bus_format;
+ u32 bus_flags;
+ bool invert_mode;
+ bool partial_mode;
+ u16 partial_start;
+ u16 partial_end;
+};
+
struct st7789v {
struct drm_panel panel;
+ const struct st7789_panel_info *info;
struct spi_device *spi;
struct gpio_desc *reset;
struct regulator *power;
+ enum drm_panel_orientation orientation;
};
enum st7789v_prefix {
@@ -132,17 +146,12 @@ static int st7789v_spi_write(struct st7789v *ctx, enum st7789v_prefix prefix,
u8 data)
{
struct spi_transfer xfer = { };
- struct spi_message msg;
u16 txbuf = ((prefix & 1) << 8) | data;
- spi_message_init(&msg);
-
xfer.tx_buf = &txbuf;
- xfer.bits_per_word = 9;
xfer.len = sizeof(txbuf);
- spi_message_add_tail(&xfer, &msg);
- return spi_sync(ctx->spi, &msg);
+ return spi_sync_transfer(ctx->spi, &xfer, 1);
}
static int st7789v_write_command(struct st7789v *ctx, u8 cmd)
@@ -155,6 +164,76 @@ static int st7789v_write_data(struct st7789v *ctx, u8 cmd)
return st7789v_spi_write(ctx, ST7789V_DATA, cmd);
}
+static int st7789v_read_data(struct st7789v *ctx, u8 cmd, u8 *buf,
+ unsigned int len)
+{
+ struct spi_transfer xfer[2] = { };
+ struct spi_message msg;
+ u16 txbuf = ((ST7789V_COMMAND & 1) << 8) | cmd;
+ u16 rxbuf[4] = {};
+ u8 bit9 = 0;
+ int ret, i;
+
+ switch (len) {
+ case 1:
+ case 3:
+ case 4:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ spi_message_init(&msg);
+
+ xfer[0].tx_buf = &txbuf;
+ xfer[0].len = sizeof(txbuf);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ xfer[1].rx_buf = rxbuf;
+ xfer[1].len = len * 2;
+ spi_message_add_tail(&xfer[1], &msg);
+
+ ret = spi_sync(ctx->spi, &msg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < len; i++) {
+ buf[i] = rxbuf[i] >> i | (bit9 << (9 - i));
+ if (i)
+ bit9 = rxbuf[i] & GENMASK(i - 1, 0);
+ }
+
+ return 0;
+}
+
+static int st7789v_check_id(struct drm_panel *panel)
+{
+ const u8 st7789v_ids[ST7789V_IDS_SIZE] = ST7789V_IDS;
+ struct st7789v *ctx = panel_to_st7789v(panel);
+ bool invalid_ids = false;
+ int ret, i;
+ u8 ids[3];
+
+ if (ctx->spi->mode & SPI_NO_RX)
+ return 0;
+
+ ret = st7789v_read_data(ctx, MIPI_DCS_GET_DISPLAY_ID, ids, ST7789V_IDS_SIZE);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ST7789V_IDS_SIZE; i++) {
+ if (ids[i] != st7789v_ids[i]) {
+ invalid_ids = true;
+ break;
+ }
+ }
+
+ if (invalid_ids)
+ return -EIO;
+
+ return 0;
+}
+
static const struct drm_display_mode default_mode = {
.clock = 7000,
.hdisplay = 240,
@@ -165,18 +244,102 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 320 + 8,
.vsync_end = 320 + 8 + 4,
.vtotal = 320 + 8 + 4 + 4,
+ .width_mm = 61,
+ .height_mm = 103,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct drm_display_mode t28cp45tn89_mode = {
+ .clock = 6008,
+ .hdisplay = 240,
+ .hsync_start = 240 + 38,
+ .hsync_end = 240 + 38 + 10,
+ .htotal = 240 + 38 + 10 + 10,
+ .vdisplay = 320,
+ .vsync_start = 320 + 8,
+ .vsync_end = 320 + 8 + 4,
+ .vtotal = 320 + 8 + 4 + 4,
+ .width_mm = 43,
+ .height_mm = 57,
+ .flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct drm_display_mode et028013dma_mode = {
+ .clock = 3000,
+ .hdisplay = 240,
+ .hsync_start = 240 + 38,
+ .hsync_end = 240 + 38 + 10,
+ .htotal = 240 + 38 + 10 + 10,
+ .vdisplay = 320,
+ .vsync_start = 320 + 8,
+ .vsync_end = 320 + 8 + 4,
+ .vtotal = 320 + 8 + 4 + 4,
+ .width_mm = 43,
+ .height_mm = 58,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
+ .clock = 6000,
+ .hdisplay = 240,
+ .hsync_start = 240 + 28,
+ .hsync_end = 240 + 28 + 10,
+ .htotal = 240 + 28 + 10 + 10,
+ .vdisplay = 280,
+ .vsync_start = 280 + 8,
+ .vsync_end = 280 + 8 + 4,
+ .vtotal = 280 + 8 + 4 + 4,
+ .width_mm = 43,
+ .height_mm = 37,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct st7789_panel_info default_panel = {
+ .mode = &default_mode,
+ .invert_mode = true,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+};
+
+static const struct st7789_panel_info t28cp45tn89_panel = {
+ .mode = &t28cp45tn89_mode,
+ .invert_mode = false,
+ .bus_format = MEDIA_BUS_FMT_RGB565_1X16,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+};
+
+static const struct st7789_panel_info et028013dma_panel = {
+ .mode = &et028013dma_mode,
+ .invert_mode = true,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+};
+
+static const struct st7789_panel_info jt240mhqs_hwt_ek_e3_panel = {
+ .mode = &jt240mhqs_hwt_ek_e3_mode,
+ .invert_mode = true,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+ .partial_mode = true,
+ .partial_start = 38,
+ .partial_end = 318,
};
static int st7789v_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
+ struct st7789v *ctx = panel_to_st7789v(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, &default_mode);
+ mode = drm_mode_duplicate(connector->dev, ctx->info->mode);
if (!mode) {
- dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ ctx->info->mode->hdisplay, ctx->info->mode->vdisplay,
+ drm_mode_vrefresh(ctx->info->mode));
return -ENOMEM;
}
@@ -185,17 +348,65 @@ static int st7789v_get_modes(struct drm_panel *panel,
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- connector->display_info.width_mm = 61;
- connector->display_info.height_mm = 103;
+ connector->display_info.bpc = 6;
+ connector->display_info.width_mm = ctx->info->mode->width_mm;
+ connector->display_info.height_mm = ctx->info->mode->height_mm;
+ connector->display_info.bus_flags = ctx->info->bus_flags;
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &ctx->info->bus_format, 1);
+
+ /*
+ * TODO: Remove once all drm drivers call
+ * drm_connector_set_orientation_from_panel()
+ */
+ drm_connector_set_panel_orientation(connector, ctx->orientation);
return 1;
}
+static enum drm_panel_orientation st7789v_get_orientation(struct drm_panel *p)
+{
+ struct st7789v *ctx = panel_to_st7789v(p);
+
+ return ctx->orientation;
+}
+
static int st7789v_prepare(struct drm_panel *panel)
{
struct st7789v *ctx = panel_to_st7789v(panel);
+ u8 mode, pixel_fmt, polarity;
int ret;
+ if (!ctx->info->partial_mode)
+ mode = ST7789V_RGBCTRL_WO;
+ else
+ mode = 0;
+
+ switch (ctx->info->bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ pixel_fmt = MIPI_DCS_PIXEL_FMT_18BIT;
+ break;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ pixel_fmt = MIPI_DCS_PIXEL_FMT_16BIT;
+ break;
+ default:
+ dev_err(panel->dev, "unsupported bus format: %d\n",
+ ctx->info->bus_format);
+ return -EINVAL;
+ }
+
+ pixel_fmt = (pixel_fmt << 4) | pixel_fmt;
+
+ polarity = 0;
+ if (ctx->info->mode->flags & DRM_MODE_FLAG_PVSYNC)
+ polarity |= ST7789V_RGBCTRL_VSYNC_HIGH;
+ if (ctx->info->mode->flags & DRM_MODE_FLAG_PHSYNC)
+ polarity |= ST7789V_RGBCTRL_HSYNC_HIGH;
+ if (ctx->info->bus_flags & DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE)
+ polarity |= ST7789V_RGBCTRL_PCLK_FALLING;
+ if (ctx->info->bus_flags & DRM_BUS_FLAG_DE_LOW)
+ polarity |= ST7789V_RGBCTRL_DE_LOW;
+
ret = regulator_enable(ctx->power);
if (ret)
return ret;
@@ -205,6 +416,14 @@ static int st7789v_prepare(struct drm_panel *panel)
gpiod_set_value(ctx->reset, 0);
msleep(120);
+ /*
+ * Avoid failing if the IDs are invalid in case the Rx bus width
+ * description is missing.
+ */
+ ret = st7789v_check_id(panel);
+ if (ret)
+ dev_warn(panel->dev, "Unrecognized panel IDs");
+
ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_EXIT_SLEEP_MODE));
/* We need to wait 120ms after a sleep out command */
@@ -216,9 +435,7 @@ static int st7789v_prepare(struct drm_panel *panel)
ST7789V_TEST(ret, st7789v_write_command(ctx,
MIPI_DCS_SET_PIXEL_FORMAT));
- ST7789V_TEST(ret, st7789v_write_data(ctx,
- (MIPI_DCS_PIXEL_FMT_18BIT << 4) |
- (MIPI_DCS_PIXEL_FMT_18BIT)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, pixel_fmt));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PORCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc));
@@ -296,7 +513,44 @@ static int st7789v_prepare(struct drm_panel *panel)
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN61(0x1b)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN62(0x28)));
- ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_ENTER_INVERT_MODE));
+ if (ctx->info->invert_mode) {
+ ST7789V_TEST(ret, st7789v_write_command(ctx,
+ MIPI_DCS_ENTER_INVERT_MODE));
+ } else {
+ ST7789V_TEST(ret, st7789v_write_command(ctx,
+ MIPI_DCS_EXIT_INVERT_MODE));
+ }
+
+ if (ctx->info->partial_mode) {
+ u8 area_data[4] = {
+ (ctx->info->partial_start >> 8) & 0xff,
+ (ctx->info->partial_start >> 0) & 0xff,
+ ((ctx->info->partial_end - 1) >> 8) & 0xff,
+ ((ctx->info->partial_end - 1) >> 0) & 0xff,
+ };
+
+ /* Caution: if userspace ever pushes a mode different from the
+ * expected one (i.e., the one advertised by get_modes), we'll
+ * add margins.
+ */
+
+ ST7789V_TEST(ret, st7789v_write_command(
+ ctx, MIPI_DCS_ENTER_PARTIAL_MODE));
+
+ ST7789V_TEST(ret, st7789v_write_command(
+ ctx, MIPI_DCS_SET_PAGE_ADDRESS));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[0]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[1]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[2]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[3]));
+
+ ST7789V_TEST(ret, st7789v_write_command(
+ ctx, MIPI_DCS_SET_PARTIAL_ROWS));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[0]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[1]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[2]));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[3]));
+ }
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RAMCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_DM_RGB |
@@ -305,11 +559,9 @@ static int st7789v_prepare(struct drm_panel *panel)
ST7789V_RAMCTRL_MAGIC));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RGBCTRL_CMD));
- ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_WO |
+ ST7789V_TEST(ret, st7789v_write_data(ctx, mode |
ST7789V_RGBCTRL_RCM(2) |
- ST7789V_RGBCTRL_VSYNC_HIGH |
- ST7789V_RGBCTRL_HSYNC_HIGH |
- ST7789V_RGBCTRL_PCLK_HIGH));
+ polarity));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_VBP(8)));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_HBP(20)));
@@ -346,41 +598,52 @@ static int st7789v_unprepare(struct drm_panel *panel)
}
static const struct drm_panel_funcs st7789v_drm_funcs = {
- .disable = st7789v_disable,
- .enable = st7789v_enable,
- .get_modes = st7789v_get_modes,
- .prepare = st7789v_prepare,
- .unprepare = st7789v_unprepare,
+ .disable = st7789v_disable,
+ .enable = st7789v_enable,
+ .get_modes = st7789v_get_modes,
+ .get_orientation = st7789v_get_orientation,
+ .prepare = st7789v_prepare,
+ .unprepare = st7789v_unprepare,
};
static int st7789v_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct st7789v *ctx;
int ret;
- ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
spi_set_drvdata(spi, ctx);
ctx->spi = spi;
- drm_panel_init(&ctx->panel, &spi->dev, &st7789v_drm_funcs,
+ spi->bits_per_word = 9;
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret, "Failed to setup spi\n");
+
+ ctx->info = device_get_match_data(&spi->dev);
+
+ drm_panel_init(&ctx->panel, dev, &st7789v_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
- ctx->power = devm_regulator_get(&spi->dev, "power");
- if (IS_ERR(ctx->power))
- return PTR_ERR(ctx->power);
+ ctx->power = devm_regulator_get(dev, "power");
+ ret = PTR_ERR_OR_ZERO(ctx->power);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulator\n");
- ctx->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(ctx->reset)) {
- dev_err(&spi->dev, "Couldn't get our reset line\n");
- return PTR_ERR(ctx->reset);
- }
+ ctx->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(ctx->reset);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get reset line\n");
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
drm_panel_add(&ctx->panel);
@@ -394,8 +657,21 @@ static void st7789v_remove(struct spi_device *spi)
drm_panel_remove(&ctx->panel);
}
+static const struct spi_device_id st7789v_spi_id[] = {
+ { "st7789v", (unsigned long) &default_panel },
+ { "t28cp45tn89-v17", (unsigned long) &t28cp45tn89_panel },
+ { "et028013dma", (unsigned long) &et028013dma_panel },
+ { "jt240mhqs-hwt-ek-e3", (unsigned long) &jt240mhqs_hwt_ek_e3_panel },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, st7789v_spi_id);
+
static const struct of_device_id st7789v_of_match[] = {
- { .compatible = "sitronix,st7789v" },
+ { .compatible = "sitronix,st7789v", .data = &default_panel },
+ { .compatible = "inanbo,t28cp45tn89-v17", .data = &t28cp45tn89_panel },
+ { .compatible = "edt,et028013dma", .data = &et028013dma_panel },
+ { .compatible = "jasonic,jt240mhqs-hwt-ek-e3",
+ .data = &jt240mhqs_hwt_ek_e3_panel },
{ }
};
MODULE_DEVICE_TABLE(of, st7789v_of_match);
@@ -403,6 +679,7 @@ MODULE_DEVICE_TABLE(of, st7789v_of_match);
static struct spi_driver st7789v_driver = {
.probe = st7789v_probe,
.remove = st7789v_remove,
+ .id_table = st7789v_spi_id,
.driver = {
.name = "st7789v",
.of_match_table = st7789v_of_match,
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
index 8d8813dbaa45..1bde2f01786b 100644
--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
@@ -14,7 +14,6 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c b/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
new file mode 100644
index 000000000000..6e77a2d71d81
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 InforceComputing
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2023 BayLibre, SAS
+ *
+ * Authors:
+ * - Vinay Simha BN <[email protected]>
+ * - Sumit Semwal <[email protected]>
+ * - Guillaume La Roque <[email protected]>
+ *
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define DSI_REG_MCAP 0xB0
+#define DSI_REG_IS 0xB3 /* Interface Setting */
+#define DSI_REG_IIS 0xB4 /* Interface ID Setting */
+#define DSI_REG_CTRL 0xB6
+
+enum {
+ IOVCC = 0,
+ POWER = 1
+};
+
+struct stk_panel {
+ bool prepared;
+ const struct drm_display_mode *mode;
+ struct backlight_device *backlight;
+ struct drm_panel base;
+ struct gpio_desc *enable_gpio; /* Power IC supply enable */
+ struct gpio_desc *reset_gpio; /* External reset */
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[2];
+};
+
+static inline struct stk_panel *to_stk_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct stk_panel, base);
+}
+
+static int stk_panel_init(struct stk_panel *stk)
+{
+ struct mipi_dsi_device *dsi = stk->dsi;
+ struct device *dev = &stk->dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_soft_reset(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to mipi_dsi_dcs_soft_reset: %d\n", ret);
+ return ret;
+ }
+ mdelay(5);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to set exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ mipi_dsi_generic_write_seq(dsi, DSI_REG_MCAP, 0x04);
+
+ /* Interface setting, video mode */
+ mipi_dsi_generic_write_seq(dsi, DSI_REG_IS, 0x14, 0x08, 0x00, 0x22, 0x00);
+ mipi_dsi_generic_write_seq(dsi, DSI_REG_IIS, 0x0C, 0x00);
+ mipi_dsi_generic_write_seq(dsi, DSI_REG_CTRL, 0x3A, 0xD3);
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x77);
+ if (ret < 0) {
+ dev_err(dev, "failed to write display brightness: %d\n", ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ MIPI_DCS_WRITE_MEMORY_START);
+
+ ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
+ if (ret < 0) {
+ dev_err(dev, "failed to set pixel format: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_column_address(dsi, 0, stk->mode->hdisplay - 1);
+ if (ret < 0) {
+ dev_err(dev, "failed to set column address: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_page_address(dsi, 0, stk->mode->vdisplay - 1);
+ if (ret < 0) {
+ dev_err(dev, "failed to set page address: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stk_panel_on(struct stk_panel *stk)
+{
+ struct mipi_dsi_device *dsi = stk->dsi;
+ struct device *dev = &stk->dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0)
+ dev_err(dev, "failed to set display on: %d\n", ret);
+
+ mdelay(20);
+
+ return ret;
+}
+
+static void stk_panel_off(struct stk_panel *stk)
+{
+ struct mipi_dsi_device *dsi = stk->dsi;
+ struct device *dev = &stk->dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ dev_err(dev, "failed to set display off: %d\n", ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ dev_err(dev, "failed to enter sleep mode: %d\n", ret);
+
+ msleep(100);
+}
+
+static int stk_panel_unprepare(struct drm_panel *panel)
+{
+ struct stk_panel *stk = to_stk_panel(panel);
+
+ if (!stk->prepared)
+ return 0;
+
+ stk_panel_off(stk);
+ regulator_bulk_disable(ARRAY_SIZE(stk->supplies), stk->supplies);
+ gpiod_set_value(stk->reset_gpio, 0);
+ gpiod_set_value(stk->enable_gpio, 1);
+
+ stk->prepared = false;
+
+ return 0;
+}
+
+static int stk_panel_prepare(struct drm_panel *panel)
+{
+ struct stk_panel *stk = to_stk_panel(panel);
+ struct device *dev = &stk->dsi->dev;
+ int ret;
+
+ if (stk->prepared)
+ return 0;
+
+ gpiod_set_value(stk->reset_gpio, 0);
+ gpiod_set_value(stk->enable_gpio, 0);
+ ret = regulator_enable(stk->supplies[IOVCC].consumer);
+ if (ret < 0)
+ return ret;
+
+ mdelay(8);
+ ret = regulator_enable(stk->supplies[POWER].consumer);
+ if (ret < 0)
+ goto iovccoff;
+
+ mdelay(20);
+ gpiod_set_value(stk->enable_gpio, 1);
+ mdelay(20);
+ gpiod_set_value(stk->reset_gpio, 1);
+ mdelay(10);
+ ret = stk_panel_init(stk);
+ if (ret < 0) {
+ dev_err(dev, "failed to init panel: %d\n", ret);
+ goto poweroff;
+ }
+
+ ret = stk_panel_on(stk);
+ if (ret < 0) {
+ dev_err(dev, "failed to set panel on: %d\n", ret);
+ goto poweroff;
+ }
+
+ stk->prepared = true;
+
+ return 0;
+
+poweroff:
+ regulator_disable(stk->supplies[POWER].consumer);
+iovccoff:
+ regulator_disable(stk->supplies[IOVCC].consumer);
+ gpiod_set_value(stk->reset_gpio, 0);
+ gpiod_set_value(stk->enable_gpio, 0);
+
+ return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 163204,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 144,
+ .hsync_end = 1200 + 144 + 16,
+ .htotal = 1200 + 144 + 16 + 45,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 8,
+ .vsync_end = 1920 + 8 + 4,
+ .vtotal = 1920 + 8 + 4 + 4,
+ .width_mm = 95,
+ .height_mm = 151,
+};
+
+static int stk_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ connector->display_info.width_mm = default_mode.width_mm;
+ connector->display_info.height_mm = default_mode.height_mm;
+ return 1;
+}
+
+static int dsi_dcs_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ int ret;
+ u16 brightness;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ return brightness & 0xff;
+}
+
+static int dsi_dcs_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
+ if (ret < 0) {
+ dev_err(dev, "failed to set DSI control: %d\n", ret);
+ return ret;
+ }
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ return 0;
+}
+
+static const struct backlight_ops dsi_bl_ops = {
+ .update_status = dsi_dcs_bl_update_status,
+ .get_brightness = dsi_dcs_bl_get_brightness,
+};
+
+static struct backlight_device *
+drm_panel_create_dsi_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 255,
+ .max_brightness = 255,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &dsi_bl_ops, &props);
+}
+
+static const struct drm_panel_funcs stk_panel_funcs = {
+ .unprepare = stk_panel_unprepare,
+ .prepare = stk_panel_prepare,
+ .get_modes = stk_panel_get_modes,
+};
+
+static const struct of_device_id stk_of_match[] = {
+ { .compatible = "startek,kd070fhfid015", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, stk_of_match);
+
+static int stk_panel_add(struct stk_panel *stk)
+{
+ struct device *dev = &stk->dsi->dev;
+ int ret;
+
+ stk->mode = &default_mode;
+
+ stk->supplies[IOVCC].supply = "iovcc";
+ stk->supplies[POWER].supply = "power";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(stk->supplies), stk->supplies);
+ if (ret) {
+ dev_err(dev, "regulator_bulk failed\n");
+ return ret;
+ }
+
+ stk->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(stk->reset_gpio)) {
+ ret = PTR_ERR(stk->reset_gpio);
+ dev_err(dev, "cannot get reset-gpios %d\n", ret);
+ return ret;
+ }
+
+ stk->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(stk->enable_gpio)) {
+ ret = PTR_ERR(stk->enable_gpio);
+ dev_err(dev, "cannot get enable-gpio %d\n", ret);
+ return ret;
+ }
+
+ stk->backlight = drm_panel_create_dsi_backlight(stk->dsi);
+ if (IS_ERR(stk->backlight)) {
+ ret = PTR_ERR(stk->backlight);
+ dev_err(dev, "failed to register backlight %d\n", ret);
+ return ret;
+ }
+
+ drm_panel_init(&stk->base, &stk->dsi->dev, &stk_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ drm_panel_add(&stk->base);
+
+ return 0;
+}
+
+static int stk_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct stk_panel *stk;
+ int ret;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = (MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM);
+
+ stk = devm_kzalloc(&dsi->dev, sizeof(*stk), GFP_KERNEL);
+ if (!stk)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, stk);
+
+ stk->dsi = dsi;
+
+ ret = stk_panel_add(stk);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ drm_panel_remove(&stk->base);
+
+ return 0;
+}
+
+static void stk_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct stk_panel *stk = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
+ err);
+
+ drm_panel_remove(&stk->base);
+}
+
+static struct mipi_dsi_driver stk_panel_driver = {
+ .driver = {
+ .name = "panel-startek-kd070fhfid015",
+ .of_match_table = stk_of_match,
+ },
+ .probe = stk_panel_probe,
+ .remove = stk_panel_remove,
+};
+module_mipi_dsi_driver(stk_panel_driver);
+
+MODULE_AUTHOR("Guillaume La Roque <[email protected]>");
+MODULE_DESCRIPTION("STARTEK KD070FHFID015");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index b31cffb660a7..4f4009f9fe25 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -7,7 +7,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-visionox-r66451.c b/drivers/gpu/drm/panel/panel-visionox-r66451.c
new file mode 100644
index 000000000000..00fc28ad3d07
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-r66451.c
@@ -0,0 +1,390 @@
+//SPDX-License-Identifier: GPL-2.0-only
+//Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/display/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
+
+#include <video/mipi_display.h>
+
+struct visionox_r66451 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[2];
+ bool prepared, enabled;
+};
+
+static inline struct visionox_r66451 *to_visionox_r66451(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_r66451, panel);
+}
+
+static void visionox_r66451_reset(struct visionox_r66451 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 10100);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 10100);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 10100);
+}
+
+static int visionox_r66451_on(struct visionox_r66451 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xc2,
+ 0x09, 0x24, 0x0c, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00,
+ 0x09, 0x3c);
+ mipi_dsi_dcs_write_seq(dsi, 0xd7,
+ 0x00, 0xb9, 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+ 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xde,
+ 0x40, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18,
+ 0x10, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x02, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xe8, 0x00, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x00, 0x08);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xc4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x32);
+ mipi_dsi_dcs_write_seq(dsi, 0xcf,
+ 0x64, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x0b, 0x77, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0xd3,
+ 0x45, 0x00, 0x00, 0x01, 0x13, 0x15, 0x00, 0x15, 0x07,
+ 0x0f, 0x77, 0x77, 0x77, 0x37, 0xb2, 0x11, 0x00, 0xa0,
+ 0x3c, 0x9c);
+ mipi_dsi_dcs_write_seq(dsi, 0xd7,
+ 0x00, 0xb9, 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19,
+ 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a);
+ mipi_dsi_dcs_write_seq(dsi, 0xd8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a,
+ 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0a, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x32, 0x00, 0x0a, 0x00, 0x22);
+ mipi_dsi_dcs_write_seq(dsi, 0xdf,
+ 0x50, 0x42, 0x58, 0x81, 0x2d, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x0f, 0xff, 0xd4, 0x0e, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0x53, 0xf1, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x34, 0xb4, 0x00, 0x00, 0x00, 0x39, 0x04, 0x09, 0x34);
+ mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x50, 0x40);
+ mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x50, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x01, 0x00, 0x00, 0x00, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x00, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x19);
+ mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x50, 0x42);
+ mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_set_column_address(dsi, 0, 1080 - 1);
+ mipi_dsi_dcs_set_page_address(dsi, 0, 2340 - 1);
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int visionox_r66451_off(struct visionox_r66451 *ctx)
+{
+ ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ return 0;
+}
+
+static int visionox_r66451_prepare(struct drm_panel *panel)
+{
+ struct visionox_r66451 *ctx = to_visionox_r66451(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ visionox_r66451_reset(ctx);
+
+ ret = visionox_r66451_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return ret;
+ }
+
+ mipi_dsi_compression_mode(ctx->dsi, true);
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int visionox_r66451_unprepare(struct drm_panel *panel)
+{
+ struct visionox_r66451 *ctx = to_visionox_r66451(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = visionox_r66451_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode visionox_r66451_mode = {
+ .clock = 345830,
+ .hdisplay = 1080,
+ .hsync_start = 1175,
+ .hsync_end = 1176,
+ .htotal = 1216,
+ .vdisplay = 2340,
+ .vsync_start = 2365,
+ .vsync_end = 2366,
+ .vtotal = 2370,
+ .width_mm = 0,
+ .height_mm = 0,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int visionox_r66451_enable(struct drm_panel *panel)
+{
+ struct visionox_r66451 *ctx = to_visionox_r66451(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct drm_dsc_picture_parameter_set pps;
+ int ret;
+
+ if (ctx->enabled)
+ return 0;
+
+ if (!dsi->dsc) {
+ dev_err(&dsi->dev, "DSC not attached to DSI\n");
+ return -ENODEV;
+ }
+
+ drm_dsc_pps_payload_pack(&pps, dsi->dsc);
+ ret = mipi_dsi_picture_parameter_set(dsi, &pps);
+ if (ret) {
+ dev_err(&dsi->dev, "Failed to set PPS\n");
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "Failed on set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ctx->enabled = true;
+
+ return 0;
+}
+
+static int visionox_r66451_disable(struct drm_panel *panel)
+{
+ struct visionox_r66451 *ctx = to_visionox_r66451(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ctx->enabled = false;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static int visionox_r66451_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ drm_connector_helper_get_modes_fixed(connector, &visionox_r66451_mode);
+ return 1;
+}
+
+static const struct drm_panel_funcs visionox_r66451_funcs = {
+ .prepare = visionox_r66451_prepare,
+ .unprepare = visionox_r66451_unprepare,
+ .get_modes = visionox_r66451_get_modes,
+ .enable = visionox_r66451_enable,
+ .disable = visionox_r66451_disable,
+};
+
+static int visionox_r66451_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+
+ return mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+}
+
+static const struct backlight_ops visionox_r66451_bl_ops = {
+ .update_status = visionox_r66451_bl_update_status,
+};
+
+static struct backlight_device *
+visionox_r66451_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 255,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &visionox_r66451_bl_ops, &props);
+}
+
+static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_r66451 *ctx;
+ struct drm_dsc_config *dsc;
+ int ret = 0;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ dsc = devm_kzalloc(dev, sizeof(*dsc), GFP_KERNEL);
+ if (!dsc)
+ return -ENOMEM;
+
+ /* Set DSC params */
+ dsc->dsc_version_major = 0x1;
+ dsc->dsc_version_minor = 0x2;
+
+ dsc->slice_height = 20;
+ dsc->slice_width = 540;
+ dsc->slice_count = 2;
+ dsc->bits_per_component = 8;
+ dsc->bits_per_pixel = 8 << 4;
+ dsc->block_pred_enable = true;
+
+ dsi->dsc = dsc;
+
+ ctx->supplies[0].supply = "vddio";
+ ctx->supplies[1].supply = "vdd";
+
+ ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &visionox_r66451_funcs, DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.backlight = visionox_r66451_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ }
+
+ return ret;
+}
+
+static void visionox_r66451_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_r66451 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id visionox_r66451_of_match[] = {
+ {.compatible = "visionox,r66451"},
+ { /*sentinel*/ }
+};
+MODULE_DEVICE_TABLE(of, visionox_r66451_of_match);
+
+static struct mipi_dsi_driver visionox_r66451_driver = {
+ .probe = visionox_r66451_probe,
+ .remove = visionox_r66451_remove,
+ .driver = {
+ .name = "panel-visionox-r66451",
+ .of_match_table = visionox_r66451_of_match,
+ },
+};
+
+module_mipi_dsi_driver(visionox_r66451_driver);
+
+MODULE_AUTHOR("Jessica Zhang <[email protected]>");
+MODULE_DESCRIPTION("Panel driver for the Visionox R66451 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index ec228c269146..c2806e4fd553 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -5,7 +5,7 @@
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index bbada731bbbd..a2ab99698ca8 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -4,8 +4,9 @@
/* Copyright 2019 Collabora ltd. */
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/pagemap.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/panfrost_drm.h>
#include <drm/drm_drv.h>
@@ -407,6 +408,10 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
bo = to_panfrost_bo(gem_obj);
+ ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL);
+ if (ret)
+ goto out_put_object;
+
mutex_lock(&pfdev->shrinker_lock);
mutex_lock(&bo->mappings.lock);
if (args->madv == PANFROST_MADV_DONTNEED) {
@@ -444,7 +449,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
out_unlock_mappings:
mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock);
-
+ dma_resv_unlock(bo->base.base.resv);
+out_put_object:
drm_gem_object_put(gem_obj);
return ret;
}
@@ -539,10 +545,7 @@ static const struct drm_driver panfrost_drm_driver = {
.minor = 2,
.gem_create_object = panfrost_gem_create_object,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
};
static int panfrost_probe(struct platform_device *pdev)
@@ -611,7 +614,7 @@ err_out0:
return err;
}
-static int panfrost_remove(struct platform_device *pdev)
+static void panfrost_remove(struct platform_device *pdev)
{
struct panfrost_device *pfdev = platform_get_drvdata(pdev);
struct drm_device *ddev = pfdev->ddev;
@@ -625,7 +628,6 @@ static int panfrost_remove(struct platform_device *pdev)
pm_runtime_set_suspended(pfdev->dev);
drm_dev_put(ddev);
- return 0;
}
/*
@@ -717,7 +719,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver panfrost_driver = {
.probe = panfrost_probe,
- .remove = panfrost_remove,
+ .remove_new = panfrost_remove,
.driver = {
.name = "panfrost",
.pm = pm_ptr(&panfrost_pm_ops),
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index bf0170782f25..6a71a2555f85 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -48,14 +48,14 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
if (!mutex_trylock(&bo->mappings.lock))
return false;
- if (!mutex_trylock(&shmem->pages_lock))
+ if (!dma_resv_trylock(shmem->base.resv))
goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo);
- drm_gem_shmem_purge_locked(&bo->base);
+ drm_gem_shmem_purge(&bo->base);
ret = true;
- mutex_unlock(&shmem->pages_lock);
+ dma_resv_unlock(shmem->base.resv);
unlock_mappings:
mutex_unlock(&bo->mappings.lock);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index dbc597ab46fb..a8b4827dc425 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -720,6 +720,22 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
if (dma_fence_is_signaled(job->done_fence))
return DRM_GPU_SCHED_STAT_NOMINAL;
+ /*
+ * Panfrost IRQ handler may take a long time to process an interrupt
+ * if there is another IRQ handler hogging the processing.
+ * For example, the HDMI encoder driver might be stuck in the IRQ
+ * handler for a significant time in a case of bad cable connection.
+ * In order to catch such cases and not report spurious Panfrost
+ * job timeouts, synchronize the IRQ handler and re-check the fence
+ * status.
+ */
+ synchronize_irq(pfdev->js->irq);
+
+ if (dma_fence_is_signaled(job->done_fence)) {
+ dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n");
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+ }
+
dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
js,
job_read(pfdev, JS_CONFIG(js)),
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index e961fa27702c..c0123d09f699 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -443,6 +443,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo;
struct address_space *mapping;
+ struct drm_gem_object *obj;
pgoff_t page_offset;
struct sg_table *sgt;
struct page **pages;
@@ -465,15 +466,16 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
page_offset = addr >> PAGE_SHIFT;
page_offset -= bomapping->mmnode.start;
- mutex_lock(&bo->base.pages_lock);
+ obj = &bo->base.base;
+
+ dma_resv_lock(obj->resv, NULL);
if (!bo->base.pages) {
bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
if (!bo->sgts) {
- mutex_unlock(&bo->base.pages_lock);
ret = -ENOMEM;
- goto err_bo;
+ goto err_unlock;
}
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
@@ -481,9 +483,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
if (!pages) {
kvfree(bo->sgts);
bo->sgts = NULL;
- mutex_unlock(&bo->base.pages_lock);
ret = -ENOMEM;
- goto err_bo;
+ goto err_unlock;
}
bo->base.pages = pages;
bo->base.pages_use_count = 1;
@@ -491,7 +492,6 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
pages = bo->base.pages;
if (pages[page_offset]) {
/* Pages are already mapped, bail out. */
- mutex_unlock(&bo->base.pages_lock);
goto out;
}
}
@@ -502,15 +502,12 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) {
- mutex_unlock(&bo->base.pages_lock);
ret = PTR_ERR(pages[i]);
pages[i] = NULL;
goto err_pages;
}
}
- mutex_unlock(&bo->base.pages_lock);
-
sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
@@ -529,6 +526,8 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
out:
+ dma_resv_unlock(obj->resv);
+
panfrost_gem_mapping_put(bomapping);
return 0;
@@ -537,6 +536,8 @@ err_map:
sg_free_table(sgt);
err_pages:
drm_gem_shmem_put_pages(&bo->base);
+err_unlock:
+ dma_resv_unlock(obj->resv);
err_bo:
panfrost_gem_mapping_put(bomapping);
return ret;
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 6afdf260a4e2..b9fe926a49e8 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -53,7 +53,7 @@ pl111_mode_valid(struct drm_simple_display_pipe *pipe,
{
struct drm_device *drm = pipe->crtc.dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
- u32 cpp = priv->variant->fb_bpp / 8;
+ u32 cpp = DIV_ROUND_UP(priv->variant->fb_depth, 8);
u64 bw;
/*
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index 2a46b5bd8576..d1fe756444ee 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -114,7 +114,7 @@ struct drm_minor;
* extensions to the control register
* @formats: array of supported pixel formats on this variant
* @nformats: the length of the array of supported pixel formats
- * @fb_bpp: desired bits per pixel on the default framebuffer
+ * @fb_depth: desired depth per pixel on the default framebuffer
*/
struct pl111_variant_data {
const char *name;
@@ -126,7 +126,7 @@ struct pl111_variant_data {
bool st_bitmux_control;
const u32 *formats;
unsigned int nformats;
- unsigned int fb_bpp;
+ unsigned int fb_depth;
};
struct pl111_drm_dev_private {
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 4b2a9e9753f6..ba3b5b5f0cdf 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -224,10 +224,7 @@ static const struct drm_driver pl111_drm_driver = {
.minor = 0,
.patchlevel = 0,
.dumb_create = drm_gem_dma_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = pl111_debugfs_init,
@@ -308,7 +305,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
if (ret < 0)
goto dev_put;
- drm_fbdev_dma_setup(drm, priv->variant->fb_bpp);
+ drm_fbdev_dma_setup(drm, priv->variant->fb_depth);
return 0;
@@ -351,7 +348,7 @@ static const struct pl111_variant_data pl110_variant = {
.is_pl110 = true,
.formats = pl110_pixel_formats,
.nformats = ARRAY_SIZE(pl110_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 16,
};
/* RealView, Versatile Express etc use this modern variant */
@@ -376,7 +373,7 @@ static const struct pl111_variant_data pl111_variant = {
.name = "PL111",
.formats = pl111_pixel_formats,
.nformats = ARRAY_SIZE(pl111_pixel_formats),
- .fb_bpp = 32,
+ .fb_depth = 32,
};
static const u32 pl110_nomadik_pixel_formats[] = {
@@ -405,7 +402,7 @@ static const struct pl111_variant_data pl110_nomadik_variant = {
.is_lcdc = true,
.st_bitmux_control = true,
.broken_vblank = true,
- .fb_bpp = 16,
+ .fb_depth = 16,
};
static const struct amba_id pl111_id_table[] = {
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 1b436b75fd39..1e4b28d03f4d 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/vexpress.h>
@@ -316,7 +317,7 @@ static const struct pl111_variant_data pl110_integrator = {
.broken_vblank = true,
.formats = pl110_integrator_pixel_formats,
.nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 16,
};
/*
@@ -330,7 +331,7 @@ static const struct pl111_variant_data pl110_impd1 = {
.broken_vblank = true,
.formats = pl110_integrator_pixel_formats,
.nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 15,
};
/*
@@ -343,7 +344,7 @@ static const struct pl111_variant_data pl110_versatile = {
.external_bgr = true,
.formats = pl110_versatile_pixel_formats,
.nformats = ARRAY_SIZE(pl110_versatile_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 16,
};
/*
@@ -355,7 +356,7 @@ static const struct pl111_variant_data pl111_realview = {
.name = "PL111 RealView",
.formats = pl111_realview_pixel_formats,
.nformats = ARRAY_SIZE(pl111_realview_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 16,
};
/*
@@ -367,7 +368,7 @@ static const struct pl111_variant_data pl111_vexpress = {
.name = "PL111 Versatile Express",
.formats = pl111_realview_pixel_formats,
.nformats = ARRAY_SIZE(pl111_realview_pixel_formats),
- .fb_bpp = 16,
+ .fb_depth = 16,
.broken_clockdivider = true,
};
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index a3b83f89e061..b30ede1cf62d 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -290,8 +290,6 @@ static struct drm_driver qxl_driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
.fops = &qxl_fops,
.ioctls = qxl_ioctls,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index ea993d7162e8..307a890fde13 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -310,7 +310,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
u32 domain,
size_t size,
struct qxl_surface *surf,
- struct qxl_bo **qobj,
+ struct drm_gem_object **gobj,
uint32_t *handle);
void qxl_gem_object_free(struct drm_gem_object *gobj);
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index d636ba685451..17df5c7ccf69 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
{
struct qxl_device *qdev = to_qxl(dev);
struct qxl_bo *qobj;
+ struct drm_gem_object *gobj;
uint32_t handle;
int r;
struct qxl_surface surf;
@@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
r = qxl_gem_object_create_with_handle(qdev, file_priv,
QXL_GEM_DOMAIN_CPU,
- args->size, &surf, &qobj,
+ args->size, &surf, &gobj,
&handle);
if (r)
return r;
+ qobj = gem_to_qxl_bo(gobj);
qobj->is_dumb = true;
+ drm_gem_object_put(gobj);
args->pitch = pitch;
args->handle = handle;
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index a08da0bd9098..fc5e3763c359 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
return 0;
}
+/*
+ * If the caller passed a valid gobj pointer, it is responsible to call
+ * drm_gem_object_put() when it no longer needs to acess the object.
+ *
+ * If gobj is NULL, it is handled internally.
+ */
int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
struct drm_file *file_priv,
u32 domain,
size_t size,
struct qxl_surface *surf,
- struct qxl_bo **qobj,
+ struct drm_gem_object **gobj,
uint32_t *handle)
{
- struct drm_gem_object *gobj;
int r;
+ struct drm_gem_object *local_gobj;
- BUG_ON(!qobj);
BUG_ON(!handle);
r = qxl_gem_object_create(qdev, size, 0,
domain,
false, false, surf,
- &gobj);
+ &local_gobj);
if (r)
return -ENOMEM;
- r = drm_gem_handle_create(file_priv, gobj, handle);
+ r = drm_gem_handle_create(file_priv, local_gobj, handle);
if (r)
return r;
- /* drop reference from allocate - handle holds it now */
- *qobj = gem_to_qxl_bo(gobj);
- drm_gem_object_put(gobj);
+
+ if (gobj)
+ *gobj = local_gobj;
+ else
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(local_gobj);
+
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 30f58b21372a..dd0f834d881c 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -38,7 +38,6 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc *qxl_alloc = data;
int ret;
- struct qxl_bo *qobj;
uint32_t handle;
u32 domain = QXL_GEM_DOMAIN_VRAM;
@@ -50,7 +49,7 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
domain,
qxl_alloc->size,
NULL,
- &qobj, &handle);
+ NULL, &handle);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret);
@@ -386,7 +385,6 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
{
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc_surf *param = data;
- struct qxl_bo *qobj;
int handle;
int ret;
int size, actual_stride;
@@ -406,7 +404,7 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
QXL_GEM_DOMAIN_SURFACE,
size,
&surf,
- &qobj, &handle);
+ NULL, &handle);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret);
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index fe498c8af1bb..f98356be0af2 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -11,7 +11,7 @@ config DRM_RADEON
select DRM_SUBALLOC_HELPER
select DRM_TTM
select DRM_TTM_HELPER
- select FB_IO_HELPERS if DRM_FBDEV_EMULATION
+ select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
select SND_HDA_COMPONENT if SND_HDA_CORE
select POWER_SUPPLY
select HWMON
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index c1bbfbe28bda..ceb6d772ef94 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -68,8 +68,8 @@ typedef struct {
} atom_exec_context;
int atom_debug = 0;
-static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
static uint32_t atom_arg_mask[8] = {
0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
@@ -163,13 +163,9 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
CU8(base + 3));
temp |=
- ((ctx->
- io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
- CU8
- (base
- +
- 1))))
- << CU8(base + 3);
+ ((ctx->io_attr >> CU8(base + 2)) &
+ (0xFFFFFFFF >> (32 - CU8(base + 1)))) <<
+ CU8(base + 3);
base += 4;
break;
case ATOM_IIO_END:
@@ -1156,7 +1152,7 @@ static struct {
atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},};
-static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
@@ -1216,7 +1212,7 @@ free:
return ret;
}
-int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
+int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params)
{
int r;
@@ -1237,7 +1233,7 @@ int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uin
return r;
}
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
{
int r;
mutex_lock(&ctx->scratch_mutex);
@@ -1359,8 +1355,8 @@ void atom_destroy(struct atom_context *ctx)
}
bool atom_parse_data_header(struct atom_context *ctx, int index,
- uint16_t * size, uint8_t * frev, uint8_t * crev,
- uint16_t * data_start)
+ uint16_t *size, uint8_t *frev, uint8_t *crev,
+ uint16_t *data_start)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->data_table + offset);
@@ -1379,8 +1375,8 @@ bool atom_parse_data_header(struct atom_context *ctx, int index,
return true;
}
-bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
- uint8_t * crev)
+bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev,
+ uint8_t *crev)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->cmd_table + offset);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 8ef25ab305ae..b8f4dac68d85 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5517,6 +5517,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
u8 frev, crev;
u8 *power_state_offset;
struct ci_ps *ps;
+ int ret;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
@@ -5546,11 +5547,15 @@ static int ci_parse_power_table(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- if (!rdev->pm.power_state[i].clock_info)
- return -EINVAL;
+ if (!rdev->pm.power_state[i].clock_info) {
+ ret = -EINVAL;
+ goto err_free_ps;
+ }
ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
- if (ps == NULL)
- return -ENOMEM;
+ if (ps == NULL) {
+ ret = -ENOMEM;
+ goto err_free_ps;
+ }
rdev->pm.dpm.ps[i].ps_priv = ps;
ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
@@ -5590,6 +5595,12 @@ static int ci_parse_power_table(struct radeon_device *rdev)
}
return 0;
+
+err_free_ps:
+ for (i = 0; i < rdev->pm.dpm.num_ps; i++)
+ kfree(rdev->pm.dpm.ps[i].ps_priv);
+ kfree(rdev->pm.dpm.ps);
+ return ret;
}
static int ci_get_vbios_boot_values(struct radeon_device *rdev,
@@ -5678,25 +5689,26 @@ int ci_dpm_init(struct radeon_device *rdev)
ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
return ret;
}
ret = r600_get_platform_caps(rdev);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
return ret;
}
ret = r600_parse_extended_power_table(rdev);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
return ret;
}
ret = ci_parse_power_table(rdev);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
+ r600_free_extended_power_table(rdev);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 5819737c21c6..5d6b81a6578e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3603,7 +3603,7 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
* @rdev: radeon_device pointer
* @ring: radeon ring buffer object
* @semaphore: radeon semaphore object
- * @emit_wait: Is this a sempahore wait?
+ * @emit_wait: Is this a semaphore wait?
*
* Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
* from running ahead of semaphore waits.
diff --git a/drivers/gpu/drm/radeon/clearstate_si.h b/drivers/gpu/drm/radeon/clearstate_si.h
index 356219c6c7f2..7da8418704fe 100644
--- a/drivers/gpu/drm/radeon/clearstate_si.h
+++ b/drivers/gpu/drm/radeon/clearstate_si.h
@@ -23,8 +23,7 @@
#include "clearstate_defs.h"
-static const u32 si_SECT_CONTEXT_def_1[] =
-{
+static const u32 si_SECT_CONTEXT_def_1[] = {
0x00000000, // DB_RENDER_CONTROL
0x00000000, // DB_COUNT_CONTROL
0x00000000, // DB_DEPTH_VIEW
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index fdddbbaecbb7..72a0768df00f 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -557,8 +557,12 @@ static int cypress_populate_mclk_value(struct radeon_device *rdev,
ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
u32 reference_clock = rdev->clock.mpll.reference_freq;
u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
- u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
- u32 clk_v = ss.percentage *
+ u32 clk_s, clk_v;
+
+ if (!decoded_ref)
+ return -EINVAL;
+ clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+ clk_v = ss.percentage *
(0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
mpll_ss1 &= ~CLKV_MASK;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 672d2239293e..3e1c1a392fb7 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -2241,8 +2241,12 @@ static int ni_populate_mclk_value(struct radeon_device *rdev,
ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
u32 reference_clock = rdev->clock.mpll.reference_freq;
u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
- u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
- u32 clk_v = ss.percentage *
+ u32 clk_s, clk_v;
+
+ if (!decoded_ref)
+ return -EINVAL;
+ clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+ clk_v = ss.percentage *
(0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
mpll_ss1 &= ~CLKV_MASK;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index d4f09ecc3d22..affa9e0309b2 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2929,7 +2929,7 @@ static void r100_set_safe_registers(struct radeon_device *rdev)
#if defined(CONFIG_DEBUG_FS)
static int r100_debugfs_rbbm_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
uint32_t reg, value;
unsigned i;
@@ -2948,7 +2948,7 @@ static int r100_debugfs_rbbm_info_show(struct seq_file *m, void *unused)
static int r100_debugfs_cp_ring_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t rdp, wdp;
unsigned count, i, j;
@@ -2974,7 +2974,7 @@ static int r100_debugfs_cp_ring_info_show(struct seq_file *m, void *unused)
static int r100_debugfs_cp_csq_fifo_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
uint32_t csq_stat, csq2_stat, tmp;
unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
unsigned i;
@@ -3022,7 +3022,7 @@ static int r100_debugfs_cp_csq_fifo_show(struct seq_file *m, void *unused)
static int r100_debugfs_mc_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
uint32_t tmp;
tmp = RREG32(RADEON_CONFIG_MEMSIZE);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 7b0cfeaddcec..25201b9a5aae 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -249,7 +249,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
- switch(rdev->num_gb_pipes) {
+ switch (rdev->num_gb_pipes) {
case 2:
gb_tile_config |= R300_PIPE_COUNT_R300;
break;
@@ -589,7 +589,7 @@ int rv370_get_pcie_lanes(struct radeon_device *rdev)
#if defined(CONFIG_DEBUG_FS)
static int rv370_debugfs_pcie_gart_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
uint32_t tmp;
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
@@ -638,7 +638,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
- switch(reg) {
+ switch (reg) {
case AVIVO_D1MODE_VLINE_START_END:
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
@@ -1180,7 +1180,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
ib = p->ib.ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
- switch(pkt->opcode) {
+ switch (pkt->opcode) {
case PACKET3_3D_LOAD_VBPNTR:
r = r100_packet3_load_vbpntr(p, pkt, idx);
if (r)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 7e6320e8c6a0..eae8a6389f5e 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -474,7 +474,7 @@ int r420_init(struct radeon_device *rdev)
#if defined(CONFIG_DEBUG_FS)
static int r420_debugfs_pipes_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
uint32_t tmp;
tmp = RREG32(R400_GB_PIPE_SELECT);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index dd78fc499402..a17b95eec65f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2918,7 +2918,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
* @rdev: radeon_device pointer
* @ring: radeon ring buffer object
* @semaphore: radeon semaphore object
- * @emit_wait: Is this a sempahore wait?
+ * @emit_wait: Is this a semaphore wait?
*
* Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
* from running ahead of semaphore waits.
@@ -4345,7 +4345,7 @@ restart_ih:
static int r600_debugfs_mc_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
DREG32_SYS(m, rdev, VM_L2_STATUS);
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 5771d1fcb073..603a78e41ba5 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -618,7 +618,7 @@ int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
atcs_input.size = sizeof(struct atcs_pref_req_input);
/* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
- atcs_input.client_id = rdev->pdev->devfn | (rdev->pdev->bus->number << 8);
+ atcs_input.client_id = pci_dev_id(rdev->pdev);
atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
if (advertise)
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h
index 35202a453e66..974fbb4ce2c2 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.h
+++ b/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -453,4 +453,13 @@ struct acpi_bus_event;
* BYTE - number of active lanes
*/
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
+bool radeon_has_atpx(void);
+bool radeon_atpx_dgpu_req_power_for_displays(void);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index d124600b5f58..a3d749e350f9 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -130,7 +130,7 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
struct radeon_agp_head *radeon_agp_head_init(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct radeon_agp_head *head = NULL;
+ struct radeon_agp_head *head;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 4ad5a328d920..85c4bb186203 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1389,7 +1389,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
- ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+ ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *)
((u8 *)&ss_info->asSS_Info[0]);
for (i = 0; i < num_indices; i++) {
if (ss_assign->ucSS_Id == id) {
@@ -1402,7 +1402,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
ss->refdiv = ss_assign->ucRecommendedRef_Div;
return true;
}
- ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
+ ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *)
((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
}
}
@@ -2105,7 +2105,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
const char *name = thermal_controller_names[power_info->info.
ucOverdriveThermalController];
info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
+ strscpy(info.type, name, sizeof(info.type));
i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
@@ -2355,7 +2355,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
struct i2c_board_info info = { };
const char *name = pp_lib_thermal_controller_names[controller->ucType];
info.addr = controller->ucI2cAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
+ strscpy(info.type, name, sizeof(info.type));
i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
} else {
@@ -3406,7 +3406,7 @@ static ATOM_VOLTAGE_OBJECT_V2 *atom_lookup_voltage_object_v2(ATOM_VOLTAGE_OBJECT
{
u32 size = le16_to_cpu(v2->sHeader.usStructureSize);
u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V2, asVoltageObj[0]);
- u8 *start = (u8*)v2;
+ u8 *start = (u8 *)v2;
while (offset < size) {
ATOM_VOLTAGE_OBJECT_V2 *vo = (ATOM_VOLTAGE_OBJECT_V2 *)(start + offset);
@@ -3423,7 +3423,7 @@ static ATOM_VOLTAGE_OBJECT_V3 *atom_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT
{
u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
- u8 *start = (u8*)v3;
+ u8 *start = (u8 *)v3;
while (offset < size) {
ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 6f93f54bf651..595354e3ce0b 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -115,7 +115,7 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
/* Fail only if calling the method fails and ATPX is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk("failed to evaluate ATPX got %s\n",
+ pr_err("failed to evaluate ATPX got %s\n",
acpi_format_exception(status));
kfree(buffer.pointer);
return NULL;
@@ -147,7 +147,7 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
}
/**
- * radeon_atpx_validate_functions - validate ATPX functions
+ * radeon_atpx_validate() - validate ATPX functions
*
* @atpx: radeon atpx struct
*
@@ -171,7 +171,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
size = *(u16 *) info->buffer.pointer;
if (size < 10) {
- printk("ATPX buffer is too small: %zu\n", size);
+ pr_err("ATPX buffer is too small: %zu\n", size);
kfree(info);
return -EINVAL;
}
@@ -202,7 +202,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
- printk("ATPX Hybrid Graphics\n");
+ pr_info("ATPX Hybrid Graphics\n");
/*
* Disable legacy PM methods only when pcie port PM is usable,
* otherwise the device might fail to power off or power on.
@@ -239,7 +239,7 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
size = *(u16 *) info->buffer.pointer;
if (size < 8) {
- printk("ATPX buffer is too small: %zu\n", size);
+ pr_err("ATPX buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
@@ -248,8 +248,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
- printk("ATPX version %u, functions 0x%08x\n",
- output.version, output.function_bits);
+ pr_info("ATPX version %u, functions 0x%08x\n",
+ output.version, output.function_bits);
radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 783a6b8802d5..2620efc7c675 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -863,7 +863,7 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
struct radeon_device *rdev = dev->dev_private;
uint16_t dac_info;
uint8_t rev, bg, dac;
- struct radeon_encoder_primary_dac *p_dac = NULL;
+ struct radeon_encoder_primary_dac *p_dac;
int found = 0;
p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
@@ -1014,7 +1014,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
struct radeon_device *rdev = dev->dev_private;
uint16_t dac_info;
uint8_t rev, bg, dac;
- struct radeon_encoder_tv_dac *tv_dac = NULL;
+ struct radeon_encoder_tv_dac *tv_dac;
int found = 0;
tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
@@ -1100,7 +1100,7 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
radeon_device
*rdev)
{
- struct radeon_encoder_lvds *lvds = NULL;
+ struct radeon_encoder_lvds *lvds;
uint32_t fp_vert_stretch, fp_horz_stretch;
uint32_t ppll_div_sel, ppll_val;
uint32_t lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
@@ -2702,7 +2702,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
struct i2c_board_info info = { };
const char *name = thermal_controller_names[thermal_controller];
info.addr = i2c_addr >> 1;
- strlcpy(info.type, name, sizeof(info.type));
+ strscpy(info.type, name, sizeof(info.type));
i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
@@ -2719,7 +2719,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
struct i2c_board_info info = { };
const char *name = "f75375";
info.addr = 0x28;
- strlcpy(info.type, name, sizeof(info.type));
+ strscpy(info.type, name, sizeof(info.type));
i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
name, info.addr);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 07193cd0c417..d2f02c3dfce2 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -198,8 +198,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
connector->name, bpc);
}
- }
- else if (bpc > 8) {
+ } else if (bpc > 8) {
/* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
connector->name);
@@ -334,10 +333,8 @@ static void radeon_connector_free_edid(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
}
static int radeon_ddc_get_modes(struct drm_connector *connector)
@@ -1372,7 +1369,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
/* assume digital unless load detected otherwise */
radeon_connector->use_digital = true;
lret = encoder_funcs->detect(encoder, connector);
- DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+ DRM_DEBUG_KMS("load_detect %x returned: %x\n", encoder->encoder_type, lret);
if (lret == connector_status_connected)
radeon_connector->use_digital = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 46a27ebf4588..a6700d7278bf 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -270,7 +270,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
- unsigned size, i;
+ u64 size;
+ unsigned i;
u32 ring = RADEON_CS_RING_GFX;
s32 priority = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e4374814f0ef..fa531493b111 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -113,59 +113,32 @@
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 50
#define KMS_DRIVER_PATCHLEVEL 0
-int radeon_suspend_kms(struct drm_device *dev, bool suspend,
- bool fbcon, bool freeze);
-int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
- unsigned int flags, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-extern bool radeon_is_px(struct drm_device *dev);
-int radeon_mode_dumb_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p);
-int radeon_mode_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
-/* atpx handler */
-#if defined(CONFIG_VGA_SWITCHEROO)
-void radeon_register_atpx_handler(void);
-void radeon_unregister_atpx_handler(void);
-bool radeon_has_atpx_dgpu_power_cntl(void);
-bool radeon_is_atpx_hybrid(void);
-#else
-static inline void radeon_register_atpx_handler(void) {}
-static inline void radeon_unregister_atpx_handler(void) {}
-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
-static inline bool radeon_is_atpx_hybrid(void) { return false; }
-#endif
int radeon_no_wb;
int radeon_modeset = -1;
int radeon_dynclks = -1;
-int radeon_r4xx_atom = 0;
+int radeon_r4xx_atom;
int radeon_agpmode = -1;
-int radeon_vram_limit = 0;
+int radeon_vram_limit;
int radeon_gart_size = -1; /* auto */
-int radeon_benchmarking = 0;
-int radeon_testing = 0;
-int radeon_connector_table = 0;
+int radeon_benchmarking;
+int radeon_testing;
+int radeon_connector_table;
int radeon_tv = 1;
int radeon_audio = -1;
-int radeon_disp_priority = 0;
-int radeon_hw_i2c = 0;
+int radeon_disp_priority;
+int radeon_hw_i2c;
int radeon_pcie_gen2 = -1;
int radeon_msi = -1;
int radeon_lockup_timeout = 10000;
-int radeon_fastfb = 0;
+int radeon_fastfb;
int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
-int radeon_hard_reset = 0;
+int radeon_hard_reset;
int radeon_vm_size = 8;
int radeon_vm_block_size = -1;
-int radeon_deep_color = 0;
+int radeon_deep_color;
int radeon_use_pflipirq = 2;
int radeon_bapm = -1;
int radeon_backlight = -1;
@@ -384,6 +357,7 @@ radeon_pci_shutdown(struct pci_dev *pdev)
static int radeon_pmops_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+
return radeon_suspend_kms(drm_dev, true, true, false);
}
@@ -404,12 +378,14 @@ static int radeon_pmops_resume(struct device *dev)
static int radeon_pmops_freeze(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+
return radeon_suspend_kms(drm_dev, false, true, true);
}
static int radeon_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+
return radeon_resume_kms(drm_dev, false, true);
}
@@ -494,6 +470,7 @@ long radeon_drm_ioctl(struct file *filp,
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev;
long ret;
+
dev = file_priv->minor->dev;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0) {
@@ -604,10 +581,7 @@ static const struct drm_driver kms_driver = {
.dumb_map_offset = radeon_mode_dumb_mmap,
.fops = &radeon_driver_kms_fops,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 2ffe0975ee54..34a1c73d3938 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -124,4 +124,17 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void radeon_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
+#endif
+
#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b3518a8f95a0..9cb6401fe97e 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -58,6 +58,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
count = -1;
list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+
count++;
if (clone_encoder == encoder)
@@ -108,9 +109,10 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
else {
- /*if (rdev->family == CHIP_R200)
- ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
- else*/
+ /* if (rdev->family == CHIP_R200)
+ * ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+ * else
+ */
ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
}
break;
@@ -234,6 +236,7 @@ void radeon_encoder_set_active_device(struct drm_encoder *encoder)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices;
DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
radeon_encoder->active_device, radeon_encoder->devices,
@@ -320,12 +323,12 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
- unsigned hblank = native_mode->htotal - native_mode->hdisplay;
- unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
- unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
- unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
- unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
- unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
+ unsigned int hblank = native_mode->htotal - native_mode->hdisplay;
+ unsigned int vblank = native_mode->vtotal - native_mode->vdisplay;
+ unsigned int hover = native_mode->hsync_start - native_mode->hdisplay;
+ unsigned int vover = native_mode->vsync_start - native_mode->vdisplay;
+ unsigned int hsync_width = native_mode->hsync_end - native_mode->hsync_start;
+ unsigned int vsync_width = native_mode->vsync_end - native_mode->vsync_start;
adjusted_mode->clock = native_mode->clock;
adjusted_mode->flags = native_mode->flags;
@@ -424,6 +427,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
bool radeon_encoder_is_digital(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index 28212c2d6c98..02bf25759059 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -193,7 +193,7 @@ static const struct fb_ops radeon_fbdev_fb_ops = {
.owner = THIS_MODULE,
.fb_open = radeon_fbdev_fb_open,
.fb_release = radeon_fbdev_fb_release,
- FB_DEFAULT_IO_OPS,
+ FB_DEFAULT_IOMEM_OPS,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_destroy = radeon_fbdev_fb_destroy,
};
@@ -253,7 +253,7 @@ static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper,
}
info->fbops = &radeon_fbdev_fb_ops;
- info->flags = FBINFO_DEFAULT;
+
/* radeon resume is fragile and needs a vt switch to help it along */
info->skip_vt_switch = false;
@@ -304,6 +304,7 @@ static void radeon_fbdev_client_unregister(struct drm_client_dev *client)
if (fb_helper->info) {
vga_switcheroo_client_fb_set(rdev->pdev, NULL);
+ drm_helper_force_disable_all(dev);
drm_fb_helper_unregister_info(fb_helper);
} else {
drm_client_release(&fb_helper->client);
@@ -382,10 +383,6 @@ void radeon_fbdev_setup(struct radeon_device *rdev)
goto err_drm_client_init;
}
- ret = radeon_fbdev_client_hotplug(&fb_helper->client);
- if (ret)
- drm_dbg_kms(rdev->ddev, "client hotplug ret=%d\n", ret);
-
drm_client_register(&fb_helper->client);
return;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 73e3117420bf..2749dde5838f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -955,7 +955,7 @@ void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_fence_info_show(struct seq_file *m, void *data)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
int i, j;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 04109a2a6fd7..4bb242437ff6 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -74,9 +74,9 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
ptr = dma_alloc_coherent(&rdev->pdev->dev, rdev->gart.table_size,
&rdev->gart.table_addr, GFP_KERNEL);
- if (ptr == NULL) {
+ if (!ptr)
return -ENOMEM;
- }
+
#ifdef CONFIG_X86
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
@@ -99,9 +99,9 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
*/
void radeon_gart_table_ram_free(struct radeon_device *rdev)
{
- if (rdev->gart.ptr == NULL) {
+ if (!rdev->gart.ptr)
return;
- }
+
#ifdef CONFIG_X86
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
@@ -133,9 +133,8 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
0, NULL, NULL, &rdev->gart.robj);
- if (r) {
+ if (r)
return r;
- }
}
return 0;
}
@@ -197,9 +196,9 @@ void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
{
int r;
- if (rdev->gart.robj == NULL) {
+ if (!rdev->gart.robj)
return;
- }
+
r = radeon_bo_reserve(rdev->gart.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.robj);
@@ -220,9 +219,9 @@ void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
*/
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
- if (rdev->gart.robj == NULL) {
+ if (!rdev->gart.robj)
return;
- }
+
radeon_bo_unref(&rdev->gart.robj);
}
@@ -239,11 +238,10 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
*/
-void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned int offset,
int pages)
{
- unsigned t;
- unsigned p;
+ unsigned int t, p;
int i, j;
if (!rdev->gart.ready) {
@@ -284,12 +282,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+int radeon_gart_bind(struct radeon_device *rdev, unsigned int offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint32_t flags)
{
- unsigned t;
- unsigned p;
+ unsigned int t, p;
uint64_t page_base, page_entry;
int i, j;
@@ -307,9 +304,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
page_entry = radeon_gart_get_page_entry(page_base, flags);
rdev->gart.pages_entry[t] = page_entry;
- if (rdev->gart.ptr) {
+ if (rdev->gart.ptr)
radeon_gart_set_page(rdev, t, page_entry);
- }
+
page_base += RADEON_GPU_PAGE_SIZE;
}
}
@@ -332,9 +329,9 @@ int radeon_gart_init(struct radeon_device *rdev)
{
int r, i;
- if (rdev->gart.pages) {
+ if (rdev->gart.pages)
return 0;
- }
+
/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
DRM_ERROR("Page size is smaller than GPU page size!\n");
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index bdc5af23f005..358d19242f4b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -316,7 +316,7 @@ int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
{
/* TODO: implement */
DRM_ERROR("unimplemented %s\n", __func__);
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
@@ -324,7 +324,7 @@ int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
{
/* TODO: implement */
DRM_ERROR("unimplemented %s\n", __func__);
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -459,7 +459,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
- struct radeon_bo *robj;
int r;
/* for now if someone requests domain CPU -
@@ -472,13 +471,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
up_read(&rdev->exclusive_lock);
return -ENOENT;
}
- robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_put(gobj);
up_read(&rdev->exclusive_lock);
- r = radeon_gem_handle_lockup(robj->rdev, r);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -879,7 +877,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
struct radeon_bo *rbo;
unsigned i = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 6a45a72488f9..fb9ecf5dbe2b 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -292,7 +292,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
static int radeon_debugfs_sa_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 3377fbc71f65..c4dda908666c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -99,6 +99,16 @@ static void radeon_hotplug_work_func(struct work_struct *work)
static void radeon_dp_work_func(struct work_struct *work)
{
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ dp_work);
+ struct drm_device *dev = rdev->ddev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ mutex_lock(&mode_config->mutex);
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ radeon_connector_hotplug(connector);
+ mutex_unlock(&mode_config->mutex);
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index e0214cf1b43b..a16590c6247f 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -444,7 +444,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
return -EINVAL;
}
- value = (uint32_t*)&value64;
+ value = (uint32_t *)&value64;
value_size = sizeof(uint64_t);
value64 = radeon_get_gpu_clock_counter(rdev);
break;
@@ -543,18 +543,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*value = rdev->vce.fb_version;
break;
case RADEON_INFO_NUM_BYTES_MOVED:
- value = (uint32_t*)&value64;
+ value = (uint32_t *)&value64;
value_size = sizeof(uint64_t);
value64 = atomic64_read(&rdev->num_bytes_moved);
break;
case RADEON_INFO_VRAM_USAGE:
- value = (uint32_t*)&value64;
+ value = (uint32_t *)&value64;
value_size = sizeof(uint64_t);
man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
value64 = ttm_resource_manager_usage(man);
break;
case RADEON_INFO_GTT_USAGE:
- value = (uint32_t*)&value64;
+ value = (uint32_t *)&value64;
value_size = sizeof(uint64_t);
man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT);
value64 = ttm_resource_manager_usage(man);
@@ -614,7 +614,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
}
- if (copy_to_user(value_ptr, (char*)value, value_size)) {
+ if (copy_to_user(value_ptr, (char *)value, value_size)) {
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 601d35d34eab..c4350ac2b3d2 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1692,7 +1692,7 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder_int_tmds *tmds = NULL;
+ struct radeon_encoder_int_tmds *tmds;
bool ret;
tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
@@ -1715,7 +1715,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder_ext_tmds *tmds = NULL;
+ struct radeon_encoder_ext_tmds *tmds;
bool ret;
if (rdev->is_atom_bios)
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 12e180b119ac..7883e9ec0bae 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -724,12 +724,14 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
}
for (i = 0; i < MAX_H_CODE_TIMING_LEN; i++) {
- if ((tv_dac->tv.h_code_timing[i] = hor_timing[i]) == 0)
+ tv_dac->tv.h_code_timing[i] = hor_timing[i];
+ if (tv_dac->tv.h_code_timing[i] == 0)
break;
}
for (i = 0; i < MAX_V_CODE_TIMING_LEN; i++) {
- if ((tv_dac->tv.v_code_timing[i] = vert_timing[i]) == 0)
+ tv_dac->tv.v_code_timing[i] = vert_timing[i];
+ if (tv_dac->tv.v_code_timing[i] == 0)
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index cbc554928bcc..b73fd9ab0252 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1916,7 +1916,7 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
static int radeon_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
struct drm_device *ddev = rdev->ddev;
if ((rdev->flags & RADEON_IS_PX) &&
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 7e207276df37..e6534fa9f1fb 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -464,7 +464,7 @@ void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
static int radeon_debugfs_ring_info_show(struct seq_file *m, void *unused)
{
- struct radeon_ring *ring = (struct radeon_ring *) m->private;
+ struct radeon_ring *ring = m->private;
struct radeon_device *rdev = ring->rdev;
uint32_t rptr, wptr, rptr_next;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index a5e1d2139e80..c9fef9b61ced 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -156,10 +156,10 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
i, *vram_start, gtt_start,
(unsigned long long)
(gtt_addr - rdev->mc.gtt_start +
- (void*)gtt_start - gtt_map),
+ (void *)gtt_start - gtt_map),
(unsigned long long)
(vram_addr - rdev->mc.vram_start +
- (void*)gtt_start - gtt_map));
+ (void *)gtt_start - gtt_map));
radeon_bo_kunmap(vram_obj);
goto out_lclean_unpin;
}
@@ -207,10 +207,10 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
i, *gtt_start, vram_start,
(unsigned long long)
(vram_addr - rdev->mc.vram_start +
- (void*)vram_start - vram_map),
+ (void *)vram_start - vram_map),
(unsigned long long)
(gtt_addr - rdev->mc.gtt_start +
- (void*)vram_start - vram_map));
+ (void *)vram_start - vram_map));
radeon_bo_kunmap(gtt_obj[i]);
goto out_lclean_unpin;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 2220cdf6a3f6..4eb83ccc4906 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -36,7 +36,6 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/swap.h>
-#include <linux/swiotlb.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
@@ -359,7 +358,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm
struct page **pages = ttm->pages + pinned;
r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
- pages, NULL);
+ pages);
if (r < 0)
goto release_pages;
@@ -780,7 +779,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
static int radeon_ttm_page_pool_show(struct seq_file *m, void *data)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
return ttm_pool_debugfs(&rdev->mman.bdev.pool, m);
}
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index ca4a36464340..d1871af967d4 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -95,7 +95,7 @@ int radeon_vce_init(struct radeon_device *rdev)
size = rdev->vce_fw->size - strlen(fw_version) - 9;
c = rdev->vce_fw->data;
- for (;size > 0; --size, ++c)
+ for (; size > 0; --size, ++c)
if (strncmp(c, fw_version, strlen(fw_version)) == 0)
break;
@@ -110,7 +110,7 @@ int radeon_vce_init(struct radeon_device *rdev)
size = rdev->vce_fw->size - strlen(fb_version) - 3;
c = rdev->vce_fw->data;
- for (;size > 0; --size, ++c)
+ for (; size > 0; --size, ++c)
if (strncmp(c, fb_version, strlen(fb_version)) == 0)
break;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 6383f7a34bd8..922a29e58880 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -307,7 +307,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
#if defined(CONFIG_DEBUG_FS)
static int rs400_debugfs_gart_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
uint32_t tmp;
tmp = RREG32(RADEON_HOST_PATH_CNTL);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 63fb06e8e2d7..76260fdfbaa7 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -221,7 +221,7 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
#if defined(CONFIG_DEBUG_FS)
static int rv515_debugfs_pipes_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
uint32_t tmp;
tmp = RREG32(GB_PIPE_SELECT);
@@ -237,7 +237,7 @@ static int rv515_debugfs_pipes_info_show(struct seq_file *m, void *unused)
static int rv515_debugfs_ga_info_show(struct seq_file *m, void *unused)
{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
+ struct radeon_device *rdev = m->private;
uint32_t tmp;
tmp = RREG32(0x2140);
diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
index d57a3e1df8d6..4464fd21a302 100644
--- a/drivers/gpu/drm/radeon/rv740_dpm.c
+++ b/drivers/gpu/drm/radeon/rv740_dpm.c
@@ -249,8 +249,12 @@ int rv740_populate_mclk_value(struct radeon_device *rdev,
ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
u32 reference_clock = rdev->clock.mpll.reference_freq;
u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
- u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
- u32 clk_v = 0x40000 * ss.percentage *
+ u32 clk_s, clk_v;
+
+ if (!decoded_ref)
+ return -EINVAL;
+ clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
+ clk_v = 0x40000 * ss.percentage *
(dividers.whole_fb_div + (dividers.frac_fb_div / 8)) / (clk_s * 10000);
mpll_ss1 &= ~CLKV_MASK;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 26fa9b095514..9ce12fa3c356 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -136,8 +136,7 @@ int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
return 0;
}
-static const u32 r7xx_golden_registers[] =
-{
+static const u32 r7xx_golden_registers[] = {
0x8d00, 0xffffffff, 0x0e0e0074,
0x8d04, 0xffffffff, 0x013a2b34,
0x9508, 0xffffffff, 0x00000002,
@@ -152,8 +151,7 @@ static const u32 r7xx_golden_registers[] =
0x7300, 0xffffffff, 0x001000f0
};
-static const u32 r7xx_golden_dyn_gpr_registers[] =
-{
+static const u32 r7xx_golden_dyn_gpr_registers[] = {
0x8db0, 0xffffffff, 0x98989898,
0x8db4, 0xffffffff, 0x98989898,
0x8db8, 0xffffffff, 0x98989898,
@@ -165,8 +163,7 @@ static const u32 r7xx_golden_dyn_gpr_registers[] =
0x88c4, 0xffffffff, 0x00000082
};
-static const u32 rv770_golden_registers[] =
-{
+static const u32 rv770_golden_registers[] = {
0x562c, 0xffffffff, 0,
0x3f90, 0xffffffff, 0,
0x9148, 0xffffffff, 0,
@@ -175,8 +172,7 @@ static const u32 rv770_golden_registers[] =
0x9698, 0x18000000, 0x18000000
};
-static const u32 rv770ce_golden_registers[] =
-{
+static const u32 rv770ce_golden_registers[] = {
0x562c, 0xffffffff, 0,
0x3f90, 0xffffffff, 0x00cc0000,
0x9148, 0xffffffff, 0x00cc0000,
@@ -187,8 +183,7 @@ static const u32 rv770ce_golden_registers[] =
0x9698, 0x18000000, 0x18000000
};
-static const u32 rv770_mgcg_init[] =
-{
+static const u32 rv770_mgcg_init[] = {
0x8bcc, 0xffffffff, 0x130300f9,
0x5448, 0xffffffff, 0x100,
0x55e4, 0xffffffff, 0x100,
@@ -345,8 +340,7 @@ static const u32 rv770_mgcg_init[] =
0x92a4, 0xffffffff, 0x00080007
};
-static const u32 rv710_golden_registers[] =
-{
+static const u32 rv710_golden_registers[] = {
0x3f90, 0x00ff0000, 0x00fc0000,
0x9148, 0x00ff0000, 0x00fc0000,
0x3f94, 0x00ff0000, 0x00fc0000,
@@ -355,8 +349,7 @@ static const u32 rv710_golden_registers[] =
0xa180, 0xffffffff, 0x00003f3f
};
-static const u32 rv710_mgcg_init[] =
-{
+static const u32 rv710_mgcg_init[] = {
0x8bcc, 0xffffffff, 0x13030040,
0x5448, 0xffffffff, 0x100,
0x55e4, 0xffffffff, 0x100,
@@ -414,8 +407,7 @@ static const u32 rv710_mgcg_init[] =
0x9150, 0xffffffff, 0x4d940000
};
-static const u32 rv730_golden_registers[] =
-{
+static const u32 rv730_golden_registers[] = {
0x3f90, 0x00ff0000, 0x00f00000,
0x9148, 0x00ff0000, 0x00f00000,
0x3f94, 0x00ff0000, 0x00f00000,
@@ -425,8 +417,7 @@ static const u32 rv730_golden_registers[] =
0xa180, 0xffffffff, 0x00003f3f
};
-static const u32 rv730_mgcg_init[] =
-{
+static const u32 rv730_mgcg_init[] = {
0x8bcc, 0xffffffff, 0x130300f9,
0x5448, 0xffffffff, 0x100,
0x55e4, 0xffffffff, 0x100,
@@ -547,8 +538,7 @@ static const u32 rv730_mgcg_init[] =
0x92a4, 0xffffffff, 0x00000005
};
-static const u32 rv740_golden_registers[] =
-{
+static const u32 rv740_golden_registers[] = {
0x88c4, 0xffffffff, 0x00000082,
0x28a50, 0xfffffffc, 0x00000004,
0x2650, 0x00040000, 0,
@@ -584,8 +574,7 @@ static const u32 rv740_golden_registers[] =
0x9698, 0x18000000, 0x18000000
};
-static const u32 rv740_mgcg_init[] =
-{
+static const u32 rv740_mgcg_init[] = {
0x8bcc, 0xffffffff, 0x13030100,
0x5448, 0xffffffff, 0x100,
0x55e4, 0xffffffff, 0x100,
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
index 45575c0d0a1d..09fa7f5e7c41 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.c
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -34,8 +34,7 @@
#define FIRST_SMC_INT_VECT_REG 0xFFD8
#define FIRST_INT_VECT_S19 0xFFC0
-static const u8 rv770_smc_int_vectors[] =
-{
+static const u8 rv770_smc_int_vectors[] = {
0x08, 0x10, 0x08, 0x10,
0x08, 0x10, 0x08, 0x10,
0x08, 0x10, 0x08, 0x10,
@@ -54,8 +53,7 @@ static const u8 rv770_smc_int_vectors[] =
0x03, 0x51, 0x03, 0x51
};
-static const u8 rv730_smc_int_vectors[] =
-{
+static const u8 rv730_smc_int_vectors[] = {
0x08, 0x15, 0x08, 0x15,
0x08, 0x15, 0x08, 0x15,
0x08, 0x15, 0x08, 0x15,
@@ -74,8 +72,7 @@ static const u8 rv730_smc_int_vectors[] =
0x03, 0x56, 0x03, 0x56
};
-static const u8 rv710_smc_int_vectors[] =
-{
+static const u8 rv710_smc_int_vectors[] = {
0x08, 0x04, 0x08, 0x04,
0x08, 0x04, 0x08, 0x04,
0x08, 0x04, 0x08, 0x04,
@@ -94,8 +91,7 @@ static const u8 rv710_smc_int_vectors[] =
0x03, 0x51, 0x03, 0x51
};
-static const u8 rv740_smc_int_vectors[] =
-{
+static const u8 rv740_smc_int_vectors[] = {
0x08, 0x10, 0x08, 0x10,
0x08, 0x10, 0x08, 0x10,
0x08, 0x10, 0x08, 0x10,
@@ -114,8 +110,7 @@ static const u8 rv740_smc_int_vectors[] =
0x03, 0x51, 0x03, 0x51
};
-static const u8 cedar_smc_int_vectors[] =
-{
+static const u8 cedar_smc_int_vectors[] = {
0x0B, 0x05, 0x0B, 0x05,
0x0B, 0x05, 0x0B, 0x05,
0x0B, 0x05, 0x0B, 0x05,
@@ -134,8 +129,7 @@ static const u8 cedar_smc_int_vectors[] =
0x04, 0xF6, 0x04, 0xF6
};
-static const u8 redwood_smc_int_vectors[] =
-{
+static const u8 redwood_smc_int_vectors[] = {
0x0B, 0x05, 0x0B, 0x05,
0x0B, 0x05, 0x0B, 0x05,
0x0B, 0x05, 0x0B, 0x05,
@@ -154,8 +148,7 @@ static const u8 redwood_smc_int_vectors[] =
0x04, 0xF6, 0x04, 0xF6
};
-static const u8 juniper_smc_int_vectors[] =
-{
+static const u8 juniper_smc_int_vectors[] = {
0x0B, 0x05, 0x0B, 0x05,
0x0B, 0x05, 0x0B, 0x05,
0x0B, 0x05, 0x0B, 0x05,
@@ -174,8 +167,7 @@ static const u8 juniper_smc_int_vectors[] =
0x04, 0xF6, 0x04, 0xF6
};
-static const u8 cypress_smc_int_vectors[] =
-{
+static const u8 cypress_smc_int_vectors[] = {
0x0B, 0x05, 0x0B, 0x05,
0x0B, 0x05, 0x0B, 0x05,
0x0B, 0x05, 0x0B, 0x05,
@@ -194,8 +186,7 @@ static const u8 cypress_smc_int_vectors[] =
0x04, 0xF6, 0x04, 0xF6
};
-static const u8 barts_smc_int_vectors[] =
-{
+static const u8 barts_smc_int_vectors[] = {
0x0C, 0x14, 0x0C, 0x14,
0x0C, 0x14, 0x0C, 0x14,
0x0C, 0x14, 0x0C, 0x14,
@@ -214,8 +205,7 @@ static const u8 barts_smc_int_vectors[] =
0x05, 0x0A, 0x05, 0x0A
};
-static const u8 turks_smc_int_vectors[] =
-{
+static const u8 turks_smc_int_vectors[] = {
0x0C, 0x14, 0x0C, 0x14,
0x0C, 0x14, 0x0C, 0x14,
0x0C, 0x14, 0x0C, 0x14,
@@ -234,8 +224,7 @@ static const u8 turks_smc_int_vectors[] =
0x05, 0x0A, 0x05, 0x0A
};
-static const u8 caicos_smc_int_vectors[] =
-{
+static const u8 caicos_smc_int_vectors[] = {
0x0C, 0x14, 0x0C, 0x14,
0x0C, 0x14, 0x0C, 0x14,
0x0C, 0x14, 0x0C, 0x14,
@@ -254,8 +243,7 @@ static const u8 caicos_smc_int_vectors[] =
0x05, 0x0A, 0x05, 0x0A
};
-static const u8 cayman_smc_int_vectors[] =
-{
+static const u8 cayman_smc_int_vectors[] = {
0x12, 0x05, 0x12, 0x05,
0x12, 0x05, 0x12, 0x05,
0x12, 0x05, 0x12, 0x05,
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 4ea1cb2e45a3..4b7dee3cf58b 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -89,8 +89,7 @@ struct PP_SIslands_PAPMStatus
};
typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
-struct PP_SIslands_PAPMParameters
-{
+struct PP_SIslands_PAPMParameters {
uint32_t NearTDPLimitTherm;
uint32_t NearTDPLimitPAPM;
uint32_t PlatformPowerLimit;
@@ -100,8 +99,7 @@ struct PP_SIslands_PAPMParameters
};
typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
-struct SISLANDS_SMC_SCLK_VALUE
-{
+struct SISLANDS_SMC_SCLK_VALUE {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
@@ -113,8 +111,7 @@ struct SISLANDS_SMC_SCLK_VALUE
typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
-struct SISLANDS_SMC_MCLK_VALUE
-{
+struct SISLANDS_SMC_MCLK_VALUE {
uint32_t vMPLL_FUNC_CNTL;
uint32_t vMPLL_FUNC_CNTL_1;
uint32_t vMPLL_FUNC_CNTL_2;
@@ -129,8 +126,7 @@ struct SISLANDS_SMC_MCLK_VALUE
typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
-struct SISLANDS_SMC_VOLTAGE_VALUE
-{
+struct SISLANDS_SMC_VOLTAGE_VALUE {
uint16_t value;
uint8_t index;
uint8_t phase_settings;
@@ -138,8 +134,7 @@ struct SISLANDS_SMC_VOLTAGE_VALUE
typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
-struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
+struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL {
uint8_t ACIndex;
uint8_t displayWatermark;
uint8_t gen2PCIE;
@@ -180,8 +175,7 @@ struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
-struct SISLANDS_SMC_SWSTATE
-{
+struct SISLANDS_SMC_SWSTATE {
uint8_t flags;
uint8_t levelCount;
uint8_t padding2;
@@ -205,8 +199,7 @@ struct SISLANDS_SMC_SWSTATE_SINGLE {
#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
-struct SISLANDS_SMC_VOLTAGEMASKTABLE
-{
+struct SISLANDS_SMC_VOLTAGEMASKTABLE {
uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
};
@@ -214,8 +207,7 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
#define SISLANDS_MAX_NO_VREG_STEPS 32
-struct SISLANDS_SMC_STATETABLE
-{
+struct SISLANDS_SMC_STATETABLE {
uint8_t thermalProtectType;
uint8_t systemFlags;
uint8_t maxVDDCIndexInPPTable;
@@ -254,8 +246,7 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
-struct PP_SIslands_FanTable
-{
+struct PP_SIslands_FanTable {
uint8_t fdo_mode;
uint8_t padding;
int16_t temp_min;
@@ -285,8 +276,7 @@ typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
#define SMC_SISLANDS_SCALE_I 7
#define SMC_SISLANDS_SCALE_R 12
-struct PP_SIslands_CacConfig
-{
+struct PP_SIslands_CacConfig {
uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
uint32_t lkge_lut_V0;
uint32_t lkge_lut_Vstep;
@@ -308,23 +298,20 @@ typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
-struct SMC_SIslands_MCRegisterAddress
-{
+struct SMC_SIslands_MCRegisterAddress {
uint16_t s0;
uint16_t s1;
};
typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
-struct SMC_SIslands_MCRegisterSet
-{
+struct SMC_SIslands_MCRegisterSet {
uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
};
typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
-struct SMC_SIslands_MCRegisters
-{
+struct SMC_SIslands_MCRegisters {
uint8_t last;
uint8_t reserved[3];
SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
@@ -333,8 +320,7 @@ struct SMC_SIslands_MCRegisters
typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
-struct SMC_SIslands_MCArbDramTimingRegisterSet
-{
+struct SMC_SIslands_MCArbDramTimingRegisterSet {
uint32_t mc_arb_dram_timing;
uint32_t mc_arb_dram_timing2;
uint8_t mc_arb_rfsh_rate;
@@ -344,8 +330,7 @@ struct SMC_SIslands_MCArbDramTimingRegisterSet
typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
-struct SMC_SIslands_MCArbDramTimingRegisters
-{
+struct SMC_SIslands_MCArbDramTimingRegisters {
uint8_t arb_current;
uint8_t reserved[3];
SMC_SIslands_MCArbDramTimingRegisterSet data[16];
@@ -353,8 +338,7 @@ struct SMC_SIslands_MCArbDramTimingRegisters
typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
-struct SMC_SISLANDS_SPLL_DIV_TABLE
-{
+struct SMC_SISLANDS_SPLL_DIV_TABLE {
uint32_t freq[256];
uint32_t ss[256];
};
@@ -374,8 +358,7 @@ typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
-struct Smc_SIslands_DTE_Configuration
-{
+struct Smc_SIslands_DTE_Configuration {
uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
uint32_t K;
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
index e2a67dda4658..26a2f5ad8ee5 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
@@ -187,11 +187,9 @@ static int rcar_cmm_probe(struct platform_device *pdev)
return 0;
}
-static int rcar_cmm_remove(struct platform_device *pdev)
+static void rcar_cmm_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id rcar_cmm_of_table[] = {
@@ -203,7 +201,7 @@ MODULE_DEVICE_TABLE(of, rcar_cmm_of_table);
static struct platform_driver rcar_cmm_platform_driver = {
.probe = rcar_cmm_probe,
- .remove = rcar_cmm_remove,
+ .remove_new = rcar_cmm_remove,
.driver = {
.name = "rcar-cmm",
.of_match_table = rcar_cmm_of_table,
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
index 1ffde19cb87f..dee530e4c8b2 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
@@ -12,7 +12,7 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -605,10 +605,7 @@ DEFINE_DRM_GEM_DMA_FOPS(rcar_du_fops);
static const struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.dumb_create = rcar_du_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = rcar_du_gem_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
@@ -642,7 +639,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(rcar_du_pm_ops,
* Platform driver
*/
-static int rcar_du_remove(struct platform_device *pdev)
+static void rcar_du_remove(struct platform_device *pdev)
{
struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
struct drm_device *ddev = &rcdu->ddev;
@@ -651,8 +648,6 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_atomic_helper_shutdown(ddev);
drm_kms_helper_poll_fini(ddev);
-
- return 0;
}
static void rcar_du_shutdown(struct platform_device *pdev)
@@ -701,6 +696,10 @@ static int rcar_du_probe(struct platform_device *pdev)
/* DRM/KMS objects */
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
+ /*
+ * Don't use dev_err_probe(), as it would overwrite the probe
+ * deferral reason recorded in rcar_du_modeset_init().
+ */
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"failed to initialize DRM/KMS (%d)\n", ret);
@@ -715,7 +714,7 @@ static int rcar_du_probe(struct platform_device *pdev)
if (ret)
goto error;
- DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
+ drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev));
drm_fbdev_generic_setup(&rcdu->ddev, 32);
@@ -728,7 +727,7 @@ error:
static struct platform_driver rcar_du_platform_driver = {
.probe = rcar_du_probe,
- .remove = rcar_du_remove,
+ .remove_new = rcar_du_remove,
.shutdown = rcar_du_shutdown,
.driver = {
.name = "rcar-du",
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
index adfb36b0e815..70d8ad065bfa 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
@@ -20,8 +20,10 @@
#include <linux/device.h>
#include <linux/dma-buf.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/wait.h>
#include "rcar_du_crtc.h"
@@ -933,7 +935,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
/* Initialize the Color Management Modules. */
ret = rcar_du_cmm_init(rcdu);
if (ret)
- return ret;
+ return dev_err_probe(rcdu->dev, ret,
+ "failed to initialize CMM\n");
/* Create the CRTCs. */
for (swindex = 0, hwindex = 0; swindex < rcdu->num_crtcs; ++hwindex) {
@@ -953,7 +956,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
/* Initialize the encoders. */
ret = rcar_du_encoders_init(rcdu);
if (ret < 0)
- return ret;
+ return dev_err_probe(rcdu->dev, ret,
+ "failed to initialize encoders\n");
if (ret == 0) {
dev_err(rcdu->dev, "error: no encoder could be initialized\n");
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c
index d759e0192181..e445fac8e0b4 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c
@@ -600,7 +600,8 @@ int __rcar_du_plane_atomic_check(struct drm_plane *plane,
if (!state->crtc) {
/*
* The visible field is not reset by the DRM core but only
- * updated by drm_plane_helper_check_state(), set it manually.
+ * updated by drm_atomic_helper_check_plane_state(), set it
+ * manually.
*/
state->visible = false;
*format = NULL;
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c
index 45c05d0ffc70..7aa0373563a4 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_vsp.c
@@ -22,6 +22,7 @@
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -122,6 +123,8 @@ static const u32 rcar_du_vsp_formats[] = {
DRM_FORMAT_RGB888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_UYVY,
@@ -154,6 +157,8 @@ static const u32 rcar_du_vsp_formats_gen4[] = {
DRM_FORMAT_RGB888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_RGBX1010102,
@@ -176,6 +181,41 @@ static const u32 rcar_du_vsp_formats_gen4[] = {
DRM_FORMAT_Y212,
};
+static u32 rcar_du_vsp_state_get_format(struct rcar_du_vsp_plane_state *state)
+{
+ u32 fourcc = state->format->fourcc;
+
+ if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ fourcc = DRM_FORMAT_XRGB1555;
+ break;
+
+ case DRM_FORMAT_ARGB4444:
+ fourcc = DRM_FORMAT_XRGB4444;
+ break;
+
+ case DRM_FORMAT_ARGB8888:
+ fourcc = DRM_FORMAT_XRGB8888;
+ break;
+
+ case DRM_FORMAT_ABGR8888:
+ fourcc = DRM_FORMAT_XBGR8888;
+ break;
+
+ case DRM_FORMAT_BGRA8888:
+ fourcc = DRM_FORMAT_BGRX8888;
+ break;
+
+ case DRM_FORMAT_RGBA1010102:
+ fourcc = DRM_FORMAT_RGBX1010102;
+ break;
+ }
+ }
+
+ return fourcc;
+}
+
static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
{
struct rcar_du_vsp_plane_state *state =
@@ -189,7 +229,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
.alpha = state->state.alpha >> 8,
.zpos = state->state.zpos,
};
- u32 fourcc = state->format->fourcc;
+ u32 fourcc = rcar_du_vsp_state_get_format(state);
unsigned int i;
cfg.src.left = state->state.src.x1 >> 16;
@@ -206,22 +246,6 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
+ fb->offsets[i];
- if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
- switch (fourcc) {
- case DRM_FORMAT_ARGB1555:
- fourcc = DRM_FORMAT_XRGB1555;
- break;
-
- case DRM_FORMAT_ARGB4444:
- fourcc = DRM_FORMAT_XRGB4444;
- break;
-
- case DRM_FORMAT_ARGB8888:
- fourcc = DRM_FORMAT_XRGB8888;
- break;
- }
- }
-
format = rcar_du_format_info(fourcc);
cfg.pixelformat = format->v4l2;
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c
index 18ed14911b98..119d69d20b23 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c
@@ -93,13 +93,11 @@ static int rcar_dw_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int rcar_dw_hdmi_remove(struct platform_device *pdev)
+static void rcar_dw_hdmi_remove(struct platform_device *pdev)
{
struct dw_hdmi *hdmi = platform_get_drvdata(pdev);
dw_hdmi_remove(hdmi);
-
- return 0;
}
static const struct of_device_id rcar_dw_hdmi_of_table[] = {
@@ -110,7 +108,7 @@ MODULE_DEVICE_TABLE(of, rcar_dw_hdmi_of_table);
static struct platform_driver rcar_dw_hdmi_platform_driver = {
.probe = rcar_dw_hdmi_probe,
- .remove = rcar_dw_hdmi_remove,
+ .remove_new = rcar_dw_hdmi_remove,
.driver = {
.name = "rcar-dw-hdmi",
.of_match_table = rcar_dw_hdmi_of_table,
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index ca215b588fd7..92ba43a6fe38 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -918,15 +918,13 @@ static int rcar_lvds_probe(struct platform_device *pdev)
return 0;
}
-static int rcar_lvds_remove(struct platform_device *pdev)
+static void rcar_lvds_remove(struct platform_device *pdev)
{
struct rcar_lvds *lvds = platform_get_drvdata(pdev);
drm_bridge_remove(&lvds->bridge);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct rcar_lvds_device_info rcar_lvds_gen2_info = {
@@ -1020,7 +1018,7 @@ static const struct dev_pm_ops rcar_lvds_pm_ops = {
static struct platform_driver rcar_lvds_platform_driver = {
.probe = rcar_lvds_probe,
- .remove = rcar_lvds_remove,
+ .remove_new = rcar_lvds_remove,
.driver = {
.name = "rcar-lvds",
.pm = &rcar_lvds_pm_ops,
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index e10e4d4b89a2..2dba7c5ffd2c 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -12,7 +12,6 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -1002,7 +1001,6 @@ static int rcar_mipi_dsi_get_clocks(struct rcar_mipi_dsi *dsi)
static int rcar_mipi_dsi_probe(struct platform_device *pdev)
{
struct rcar_mipi_dsi *dsi;
- struct resource *mem;
int ret;
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
@@ -1019,8 +1017,7 @@ static int rcar_mipi_dsi_probe(struct platform_device *pdev)
return ret;
/* Acquire resources. */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->mmio = devm_ioremap_resource(dsi->dev, mem);
+ dsi->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->mmio))
return PTR_ERR(dsi->mmio);
@@ -1044,13 +1041,11 @@ static int rcar_mipi_dsi_probe(struct platform_device *pdev)
return 0;
}
-static int rcar_mipi_dsi_remove(struct platform_device *pdev)
+static void rcar_mipi_dsi_remove(struct platform_device *pdev)
{
struct rcar_mipi_dsi *dsi = platform_get_drvdata(pdev);
mipi_dsi_host_unregister(&dsi->host);
-
- return 0;
}
static const struct rcar_mipi_dsi_device_info v3u_data = {
@@ -1093,7 +1088,7 @@ MODULE_DEVICE_TABLE(of, rcar_mipi_dsi_of_table);
static struct platform_driver rcar_mipi_dsi_platform_driver = {
.probe = rcar_mipi_dsi_probe,
- .remove = rcar_mipi_dsi_remove,
+ .remove_new = rcar_mipi_dsi_remove,
.driver = {
.name = "rcar-mipi-dsi",
.of_match_table = rcar_mipi_dsi_of_table,
diff --git a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c
index aa95b85a2964..10febea473cd 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rzg2l_mipi_dsi.c
@@ -10,7 +10,6 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -782,14 +781,12 @@ err_pm_disable:
return ret;
}
-static int rzg2l_mipi_dsi_remove(struct platform_device *pdev)
+static void rzg2l_mipi_dsi_remove(struct platform_device *pdev)
{
struct rzg2l_mipi_dsi *dsi = platform_get_drvdata(pdev);
mipi_dsi_host_unregister(&dsi->host);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id rzg2l_mipi_dsi_of_table[] = {
@@ -801,7 +798,7 @@ MODULE_DEVICE_TABLE(of, rzg2l_mipi_dsi_of_table);
static struct platform_driver rzg2l_mipi_dsi_platform_driver = {
.probe = rzg2l_mipi_dsi_probe,
- .remove = rzg2l_mipi_dsi_remove,
+ .remove_new = rzg2l_mipi_dsi_remove,
.driver = {
.name = "rzg2l-mipi-dsi",
.pm = &rzg2l_mipi_pm_ops,
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index ad2d3ae7e621..84aa811ca1e9 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -10,8 +10,9 @@
#include <linux/component.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/clk.h>
@@ -419,14 +420,12 @@ err_dp_remove:
return ret;
}
-static int rockchip_dp_remove(struct platform_device *pdev)
+static void rockchip_dp_remove(struct platform_device *pdev)
{
struct rockchip_dp_device *dp = platform_get_drvdata(pdev);
component_del(&pdev->dev, &rockchip_dp_component_ops);
analogix_dp_remove(dp->adp);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -481,7 +480,7 @@ MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
struct platform_driver rockchip_dp_driver = {
.probe = rockchip_dp_probe,
- .remove = rockchip_dp_remove,
+ .remove_new = rockchip_dp_remove,
.driver = {
.name = "rockchip-dp",
.pm = &rockchip_dp_pm_ops,
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index b6afe3786b74..a29fbafce393 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -1222,15 +1222,13 @@ static int cdn_dp_probe(struct platform_device *pdev)
return component_add(dev, &cdn_dp_component_ops);
}
-static int cdn_dp_remove(struct platform_device *pdev)
+static void cdn_dp_remove(struct platform_device *pdev)
{
struct cdn_dp_device *dp = platform_get_drvdata(pdev);
platform_device_unregister(dp->audio_pdev);
cdn_dp_suspend(dp->dev);
component_del(&pdev->dev, &cdn_dp_component_ops);
-
- return 0;
}
static void cdn_dp_shutdown(struct platform_device *pdev)
@@ -1247,7 +1245,7 @@ static const struct dev_pm_ops cdn_dp_pm_ops = {
struct platform_driver cdn_dp_driver = {
.probe = cdn_dp_probe,
- .remove = cdn_dp_remove,
+ .remove_new = cdn_dp_remove,
.shutdown = cdn_dp_shutdown,
.driver = {
.name = "cdn-dp",
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 917e79951aac..0100162a73b2 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -12,7 +12,9 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -1463,13 +1465,11 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
return 0;
}
-static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
+static void dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
{
struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
dw_mipi_dsi_remove(dsi->dmd);
-
- return 0;
}
static const struct rockchip_dw_dsi_chip_data px30_chip_data[] = {
@@ -1671,7 +1671,7 @@ MODULE_DEVICE_TABLE(of, dw_mipi_dsi_rockchip_dt_ids);
struct platform_driver dw_mipi_dsi_rockchip_driver = {
.probe = dw_mipi_dsi_rockchip_probe,
- .remove = dw_mipi_dsi_rockchip_remove,
+ .remove_new = dw_mipi_dsi_rockchip_remove,
.driver = {
.of_match_table = dw_mipi_dsi_rockchip_dt_ids,
.pm = &dw_mipi_dsi_rockchip_pm_ops,
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 112699949db9..341550199111 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -684,11 +684,9 @@ static int dw_hdmi_rockchip_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &dw_hdmi_rockchip_ops);
}
-static int dw_hdmi_rockchip_remove(struct platform_device *pdev)
+static void dw_hdmi_rockchip_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &dw_hdmi_rockchip_ops);
-
- return 0;
}
static int __maybe_unused dw_hdmi_rockchip_resume(struct device *dev)
@@ -706,7 +704,7 @@ static const struct dev_pm_ops dw_hdmi_rockchip_pm = {
struct platform_driver dw_hdmi_rockchip_pltfm_driver = {
.probe = dw_hdmi_rockchip_probe,
- .remove = dw_hdmi_rockchip_remove,
+ .remove_new = dw_hdmi_rockchip_remove,
.driver = {
.name = "dwhdmi-rockchip",
.pm = &dw_hdmi_rockchip_pm,
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index f51774866f41..6e5b922a121e 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -11,9 +11,10 @@
#include <linux/err.h>
#include <linux/hdmi.h>
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
@@ -797,7 +798,7 @@ static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
adap->dev.parent = hdmi->dev;
adap->dev.of_node = hdmi->dev->of_node;
adap->algo = &inno_hdmi_algorithm;
- strlcpy(adap->name, "Inno HDMI", sizeof(adap->name));
+ strscpy(adap->name, "Inno HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
ret = i2c_add_adapter(adap);
@@ -919,11 +920,9 @@ static int inno_hdmi_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &inno_hdmi_ops);
}
-static int inno_hdmi_remove(struct platform_device *pdev)
+static void inno_hdmi_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &inno_hdmi_ops);
-
- return 0;
}
static const struct of_device_id inno_hdmi_dt_ids[] = {
@@ -935,7 +934,7 @@ MODULE_DEVICE_TABLE(of, inno_hdmi_dt_ids);
struct platform_driver inno_hdmi_driver = {
.probe = inno_hdmi_probe,
- .remove = inno_hdmi_remove,
+ .remove_new = inno_hdmi_remove,
.driver = {
.name = "innohdmi-rockchip",
.of_match_table = inno_hdmi_dt_ids,
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 90145ad96984..fa6e592e0276 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -730,7 +730,7 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
adap->dev.parent = hdmi->dev;
adap->dev.of_node = hdmi->dev->of_node;
adap->algo = &rk3066_hdmi_algorithm;
- strlcpy(adap->name, "RK3066 HDMI", sizeof(adap->name));
+ strscpy(adap->name, "RK3066 HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
ret = i2c_add_adapter(adap);
@@ -858,11 +858,9 @@ static int rk3066_hdmi_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &rk3066_hdmi_ops);
}
-static int rk3066_hdmi_remove(struct platform_device *pdev)
+static void rk3066_hdmi_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &rk3066_hdmi_ops);
-
- return 0;
}
static const struct of_device_id rk3066_hdmi_dt_ids[] = {
@@ -873,7 +871,7 @@ MODULE_DEVICE_TABLE(of, rk3066_hdmi_dt_ids);
struct platform_driver rk3066_hdmi_driver = {
.probe = rk3066_hdmi_probe,
- .remove = rk3066_hdmi_remove,
+ .remove_new = rk3066_hdmi_remove,
.driver = {
.name = "rockchip-rk3066-hdmi",
.of_match_table = rk3066_hdmi_dt_ids,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index d97f2edc646b..ab55d7132550 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -7,6 +7,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/of_graph.h>
@@ -224,10 +225,7 @@ DEFINE_DRM_GEM_FOPS(rockchip_drm_driver_fops);
static const struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.dumb_create = rockchip_gem_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
.fops = &rockchip_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -450,13 +448,11 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
return 0;
}
-static int rockchip_drm_platform_remove(struct platform_device *pdev)
+static void rockchip_drm_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &rockchip_drm_ops);
rockchip_drm_match_remove(&pdev->dev);
-
- return 0;
}
static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
@@ -475,7 +471,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
- .remove = rockchip_drm_platform_remove,
+ .remove_new = rockchip_drm_platform_remove,
.shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index a530ecc4d207..14320bc73e5b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -12,7 +12,6 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -833,12 +832,12 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* need align with 2 pixel.
*/
if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) {
- DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
+ DRM_DEBUG_KMS("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
}
if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) {
- DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n");
+ DRM_DEBUG_KMS("Invalid Source: Yuv format does not support this rotation\n");
return -EINVAL;
}
@@ -846,7 +845,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
struct vop *vop = to_vop(crtc);
if (!vop->data->afbc) {
- DRM_ERROR("vop does not support AFBC\n");
+ DRM_DEBUG_KMS("vop does not support AFBC\n");
return -EINVAL;
}
@@ -855,15 +854,16 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
return ret;
if (new_plane_state->src.x1 || new_plane_state->src.y1) {
- DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n",
- new_plane_state->src.x1,
- new_plane_state->src.y1, fb->offsets[0]);
+ DRM_DEBUG_KMS("AFBC does not support offset display, " \
+ "xpos=%d, ypos=%d, offset=%d\n",
+ new_plane_state->src.x1, new_plane_state->src.y1,
+ fb->offsets[0]);
return -EINVAL;
}
if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) {
- DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
- new_plane_state->rotation);
+ DRM_DEBUG_KMS("No rotation support in AFBC, rotation=%d\n",
+ new_plane_state->rotation);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index ca73b8ccc29f..583df4d22f7e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -13,7 +13,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 68f6ebb33460..582859387792 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -739,19 +739,17 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
return ret;
}
-static int rockchip_lvds_remove(struct platform_device *pdev)
+static void rockchip_lvds_remove(struct platform_device *pdev)
{
struct rockchip_lvds *lvds = platform_get_drvdata(pdev);
component_del(&pdev->dev, &rockchip_lvds_component_ops);
clk_unprepare(lvds->pclk);
-
- return 0;
}
struct platform_driver rockchip_lvds_driver = {
.probe = rockchip_lvds_probe,
- .remove = rockchip_lvds_remove,
+ .remove_new = rockchip_lvds_remove,
.driver = {
.name = "rockchip-lvds",
.of_match_table = of_match_ptr(rockchip_lvds_dt_ids),
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 9d30aa73b542..62b573f282a7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -264,16 +264,14 @@ static int vop2_probe(struct platform_device *pdev)
return component_add(dev, &vop2_component_ops);
}
-static int vop2_remove(struct platform_device *pdev)
+static void vop2_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vop2_component_ops);
-
- return 0;
}
struct platform_driver vop2_platform_driver = {
.probe = vop2_probe,
- .remove = vop2_remove,
+ .remove_new = vop2_remove,
.driver = {
.name = "rockchip-vop2",
.of_match_table = of_match_ptr(vop2_dt_match),
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 20ac7811c5eb..7b2805006776 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -1163,16 +1163,14 @@ static int vop_probe(struct platform_device *pdev)
return component_add(dev, &vop_component_ops);
}
-static int vop_remove(struct platform_device *pdev)
+static void vop_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vop_component_ops);
-
- return 0;
}
struct platform_driver vop_platform_driver = {
.probe = vop_probe,
- .remove = vop_remove,
+ .remove_new = vop_remove,
.driver = {
.name = "rockchip-vop",
.of_match_table = vop_driver_dt_match,
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 68e807ae136a..a42763e1429d 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -176,16 +176,32 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
{
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
- int r;
+ unsigned long index;
dma_fence_put(f);
/* Wait for all dependencies to avoid data corruptions */
- while (!xa_empty(&job->dependencies)) {
- f = xa_erase(&job->dependencies, job->last_dependency++);
- r = dma_fence_add_callback(f, &job->finish_cb,
- drm_sched_entity_kill_jobs_cb);
- if (!r)
+ xa_for_each(&job->dependencies, index, f) {
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
+
+ if (s_fence && f == &s_fence->scheduled) {
+ /* The dependencies array had a reference on the scheduled
+ * fence, and the finished fence refcount might have
+ * dropped to zero. Use dma_fence_get_rcu() so we get
+ * a NULL fence in that case.
+ */
+ f = dma_fence_get_rcu(&s_fence->finished);
+
+ /* Now that we have a reference on the finished fence,
+ * we can release the reference the dependencies array
+ * had on the scheduled fence.
+ */
+ dma_fence_put(&s_fence->scheduled);
+ }
+
+ xa_erase(&job->dependencies, index);
+ if (f && !dma_fence_add_callback(f, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb))
return;
dma_fence_put(f);
@@ -384,7 +400,7 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
}
s_fence = to_drm_sched_fence(fence);
- if (s_fence && s_fence->sched == sched &&
+ if (!fence->error && s_fence && s_fence->sched == sched &&
!test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
/*
@@ -415,8 +431,17 @@ static struct dma_fence *
drm_sched_job_dependency(struct drm_sched_job *job,
struct drm_sched_entity *entity)
{
- if (!xa_empty(&job->dependencies))
- return xa_erase(&job->dependencies, job->last_dependency++);
+ struct dma_fence *f;
+
+ /* We keep the fence around, so we can iterate over all dependencies
+ * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
+ * before killing the job.
+ */
+ f = xa_load(&job->dependencies, job->last_dependency);
+ if (f) {
+ job->last_dependency++;
+ return dma_fence_get(f);
+ }
if (job->sched->ops->prepare_job)
return job->sched->ops->prepare_job(job, entity);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index ef120475e7c6..06cedfe4b486 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -48,8 +48,32 @@ static void __exit drm_sched_fence_slab_fini(void)
kmem_cache_destroy(sched_fence_slab);
}
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
+static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
+ struct dma_fence *fence)
{
+ /*
+ * smp_store_release() to ensure another thread racing us
+ * in drm_sched_fence_set_deadline_finished() sees the
+ * fence's parent set before test_bit()
+ */
+ smp_store_release(&s_fence->parent, dma_fence_get(fence));
+ if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
+ &s_fence->finished.flags))
+ dma_fence_set_deadline(fence, s_fence->deadline);
+}
+
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
+ struct dma_fence *parent)
+{
+ /* Set the parent before signaling the scheduled fence, such that,
+ * any waiter expecting the parent to be filled after the job has
+ * been scheduled (which is the case for drivers delegating waits
+ * to some firmware) doesn't have to busy wait for parent to show
+ * up.
+ */
+ if (!IS_ERR_OR_NULL(parent))
+ drm_sched_fence_set_parent(fence, parent);
+
dma_fence_signal(&fence->scheduled);
}
@@ -181,20 +205,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
}
EXPORT_SYMBOL(to_drm_sched_fence);
-void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
- struct dma_fence *fence)
-{
- /*
- * smp_store_release() to ensure another thread racing us
- * in drm_sched_fence_set_deadline_finished() sees the
- * fence's parent set before test_bit()
- */
- smp_store_release(&s_fence->parent, dma_fence_get(fence));
- if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
- &s_fence->finished.flags))
- dma_fence_set_deadline(fence, s_fence->deadline);
-}
-
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
void *owner)
{
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 394010a60821..506371c42745 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1043,10 +1043,9 @@ static int drm_sched_main(void *param)
trace_drm_run_job(sched_job, entity);
fence = sched->ops->run_job(sched_job);
complete_all(&entity->entity_idle);
- drm_sched_fence_scheduled(s_fence);
+ drm_sched_fence_scheduled(s_fence, fence);
if (!IS_ERR_OR_NULL(fence)) {
- drm_sched_fence_set_parent(s_fence, fence);
/* Drop for original kref_init of the fence */
dma_fence_put(fence);
@@ -1142,9 +1141,6 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
- if (!rq)
- continue;
-
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list)
/*
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 8cbf5aa66e19..5a80b228d18c 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -99,33 +99,61 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
.default_vcomh = 0x40,
.default_dclk_div = 1,
.default_dclk_frq = 5,
+ .default_width = 132,
+ .default_height = 64,
.page_mode_only = 1,
+ .page_height = 8,
},
[SSD1305_ID] = {
.default_vcomh = 0x34,
.default_dclk_div = 1,
.default_dclk_frq = 7,
+ .default_width = 132,
+ .default_height = 64,
+ .page_height = 8,
},
[SSD1306_ID] = {
.default_vcomh = 0x20,
.default_dclk_div = 1,
.default_dclk_frq = 8,
.need_chargepump = 1,
+ .default_width = 128,
+ .default_height = 64,
+ .page_height = 8,
},
[SSD1307_ID] = {
.default_vcomh = 0x20,
.default_dclk_div = 2,
.default_dclk_frq = 12,
.need_pwm = 1,
+ .default_width = 128,
+ .default_height = 39,
+ .page_height = 8,
},
[SSD1309_ID] = {
.default_vcomh = 0x34,
.default_dclk_div = 1,
.default_dclk_frq = 10,
+ .default_width = 128,
+ .default_height = 64,
+ .page_height = 8,
}
};
EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X);
+struct ssd130x_plane_state {
+ struct drm_shadow_plane_state base;
+ /* Intermediate buffer to convert pixels from XRGB8888 to HW format */
+ u8 *buffer;
+ /* Buffer to store pixels in HW format and written to the panel */
+ u8 *data_array;
+};
+
+static inline struct ssd130x_plane_state *to_ssd130x_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct ssd130x_plane_state, base.base);
+}
+
static inline struct ssd130x_device *drm_to_ssd130x(struct drm_device *drm)
{
return container_of(drm, struct ssd130x_device, drm);
@@ -419,26 +447,25 @@ static int ssd130x_init(struct ssd130x_device *ssd130x)
SSD130X_SET_ADDRESS_MODE_HORIZONTAL);
}
-static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
+static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ struct ssd130x_plane_state *ssd130x_state,
struct drm_rect *rect)
{
unsigned int x = rect->x1;
unsigned int y = rect->y1;
+ u8 *buf = ssd130x_state->buffer;
+ u8 *data_array = ssd130x_state->data_array;
unsigned int width = drm_rect_width(rect);
unsigned int height = drm_rect_height(rect);
unsigned int line_length = DIV_ROUND_UP(width, 8);
- unsigned int pages = DIV_ROUND_UP(height, 8);
+ unsigned int page_height = ssd130x->device_info->page_height;
+ unsigned int pages = DIV_ROUND_UP(height, page_height);
struct drm_device *drm = &ssd130x->drm;
u32 array_idx = 0;
int ret, i, j, k;
- u8 *data_array = NULL;
drm_WARN_ONCE(drm, y % 8 != 0, "y must be aligned to screen page\n");
- data_array = kcalloc(width, pages, GFP_KERNEL);
- if (!data_array)
- return -ENOMEM;
-
/*
* The screen is divided in pages, each having a height of 8
* pixels, and the width of the screen. When sending a byte of
@@ -472,11 +499,11 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
/* Set address range for horizontal addressing mode */
ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
if (ret < 0)
- goto out_free;
+ return ret;
ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages);
if (ret < 0)
- goto out_free;
+ return ret;
}
for (i = 0; i < pages; i++) {
@@ -506,11 +533,11 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
ssd130x->page_offset + i,
ssd130x->col_offset + x);
if (ret < 0)
- goto out_free;
+ return ret;
ret = ssd130x_write_data(ssd130x, data_array, width);
if (ret < 0)
- goto out_free;
+ return ret;
array_idx = 0;
}
@@ -520,14 +547,12 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
if (!ssd130x->page_address_mode)
ret = ssd130x_write_data(ssd130x, data_array, width * pages);
-out_free:
- kfree(data_array);
return ret;
}
-static void ssd130x_clear_screen(struct ssd130x_device *ssd130x)
+static void ssd130x_clear_screen(struct ssd130x_device *ssd130x,
+ struct ssd130x_plane_state *ssd130x_state)
{
- u8 *buf = NULL;
struct drm_rect fullscreen = {
.x1 = 0,
.x2 = ssd130x->width,
@@ -535,51 +560,80 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x)
.y2 = ssd130x->height,
};
- buf = kcalloc(DIV_ROUND_UP(ssd130x->width, 8), ssd130x->height,
- GFP_KERNEL);
- if (!buf)
- return;
-
- ssd130x_update_rect(ssd130x, buf, &fullscreen);
-
- kfree(buf);
+ ssd130x_update_rect(ssd130x, ssd130x_state, &fullscreen);
}
-static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_map *vmap,
+static int ssd130x_fb_blit_rect(struct drm_plane_state *state,
+ const struct iosys_map *vmap,
struct drm_rect *rect)
{
+ struct drm_framebuffer *fb = state->fb;
struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
+ unsigned int page_height = ssd130x->device_info->page_height;
+ struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(state);
+ u8 *buf = ssd130x_state->buffer;
struct iosys_map dst;
unsigned int dst_pitch;
int ret = 0;
- u8 *buf = NULL;
/* Align y to display page boundaries */
- rect->y1 = round_down(rect->y1, 8);
- rect->y2 = min_t(unsigned int, round_up(rect->y2, 8), ssd130x->height);
+ rect->y1 = round_down(rect->y1, page_height);
+ rect->y2 = min_t(unsigned int, round_up(rect->y2, page_height), ssd130x->height);
dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
- buf = kcalloc(dst_pitch, drm_rect_height(rect), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
- goto out_free;
+ return ret;
iosys_map_set_vaddr(&dst, buf);
drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
- ssd130x_update_rect(ssd130x, buf, rect);
-
-out_free:
- kfree(buf);
+ ssd130x_update_rect(ssd130x, ssd130x_state, rect);
return ret;
}
+static int ssd130x_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = plane->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
+ unsigned int page_height = ssd130x->device_info->page_height;
+ unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
+ const struct drm_format_info *fi;
+ unsigned int pitch;
+ int ret;
+
+ ret = drm_plane_helper_atomic_check(plane, state);
+ if (ret)
+ return ret;
+
+ fi = drm_format_info(DRM_FORMAT_R1);
+ if (!fi)
+ return -EINVAL;
+
+ pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
+
+ ssd130x_state->buffer = kcalloc(pitch, ssd130x->height, GFP_KERNEL);
+ if (!ssd130x_state->buffer)
+ return -ENOMEM;
+
+ ssd130x_state->data_array = kcalloc(ssd130x->width, pages, GFP_KERNEL);
+ if (!ssd130x_state->data_array) {
+ kfree(ssd130x_state->buffer);
+ /* Set to prevent a double free in .atomic_destroy_state() */
+ ssd130x_state->buffer = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -602,7 +656,7 @@ static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
if (!drm_rect_intersect(&dst_clip, &damage))
continue;
- ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip);
+ ssd130x_fb_blit_rect(plane_state, &shadow_plane_state->data[0], &dst_clip);
}
drm_dev_exit(idx);
@@ -613,19 +667,72 @@ static void ssd130x_primary_plane_helper_atomic_disable(struct drm_plane *plane,
{
struct drm_device *drm = plane->dev;
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane->state);
int idx;
if (!drm_dev_enter(drm, &idx))
return;
- ssd130x_clear_screen(ssd130x);
+ ssd130x_clear_screen(ssd130x, ssd130x_state);
drm_dev_exit(idx);
}
+/* Called during init to allocate the plane's atomic state. */
+static void ssd130x_primary_plane_reset(struct drm_plane *plane)
+{
+ struct ssd130x_plane_state *ssd130x_state;
+
+ WARN_ON(plane->state);
+
+ ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL);
+ if (!ssd130x_state)
+ return;
+
+ __drm_gem_reset_shadow_plane(plane, &ssd130x_state->base);
+}
+
+static struct drm_plane_state *ssd130x_primary_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_shadow_plane_state *new_shadow_plane_state;
+ struct ssd130x_plane_state *old_ssd130x_state;
+ struct ssd130x_plane_state *ssd130x_state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ old_ssd130x_state = to_ssd130x_plane_state(plane->state);
+ ssd130x_state = kmemdup(old_ssd130x_state, sizeof(*ssd130x_state), GFP_KERNEL);
+ if (!ssd130x_state)
+ return NULL;
+
+ /* The buffers are not duplicated and are allocated in .atomic_check */
+ ssd130x_state->buffer = NULL;
+ ssd130x_state->data_array = NULL;
+
+ new_shadow_plane_state = &ssd130x_state->base;
+
+ __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
+
+ return &new_shadow_plane_state->base;
+}
+
+static void ssd130x_primary_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(state);
+
+ kfree(ssd130x_state->data_array);
+ kfree(ssd130x_state->buffer);
+
+ __drm_gem_destroy_shadow_plane_state(&ssd130x_state->base);
+
+ kfree(ssd130x_state);
+}
+
static const struct drm_plane_helper_funcs ssd130x_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = drm_plane_helper_atomic_check,
+ .atomic_check = ssd130x_primary_plane_helper_atomic_check,
.atomic_update = ssd130x_primary_plane_helper_atomic_update,
.atomic_disable = ssd130x_primary_plane_helper_atomic_disable,
};
@@ -633,8 +740,10 @@ static const struct drm_plane_helper_funcs ssd130x_primary_plane_helper_funcs =
static const struct drm_plane_funcs ssd130x_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
+ .reset = ssd130x_primary_plane_reset,
+ .atomic_duplicate_state = ssd130x_primary_plane_duplicate_state,
+ .atomic_destroy_state = ssd130x_primary_plane_destroy_state,
.destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
};
static enum drm_mode_status ssd130x_crtc_helper_mode_valid(struct drm_crtc *crtc,
@@ -684,14 +793,18 @@ static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
return;
ret = ssd130x_init(ssd130x);
- if (ret) {
- ssd130x_power_off(ssd130x);
- return;
- }
+ if (ret)
+ goto power_off;
ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
backlight_enable(ssd130x->bl_dev);
+
+ return;
+
+power_off:
+ ssd130x_power_off(ssd130x);
+ return;
}
static void ssd130x_encoder_helper_atomic_disable(struct drm_encoder *encoder,
@@ -798,10 +911,10 @@ static void ssd130x_parse_properties(struct ssd130x_device *ssd130x)
struct device *dev = ssd130x->dev;
if (device_property_read_u32(dev, "solomon,width", &ssd130x->width))
- ssd130x->width = 96;
+ ssd130x->width = ssd130x->device_info->default_width;
if (device_property_read_u32(dev, "solomon,height", &ssd130x->height))
- ssd130x->height = 16;
+ ssd130x->height = ssd130x->device_info->default_height;
if (device_property_read_u32(dev, "solomon,page-offset", &ssd130x->page_offset))
ssd130x->page_offset = 1;
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
index db03ee5db392..87968b3e7fb8 100644
--- a/drivers/gpu/drm/solomon/ssd130x.h
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -37,6 +37,9 @@ struct ssd130x_deviceinfo {
u32 default_vcomh;
u32 default_dclk_div;
u32 default_dclk_frq;
+ u32 default_width;
+ u32 default_height;
+ u32 page_height;
int need_pwm;
int need_chargepump;
bool page_mode_only;
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index b96fc6837b0d..48183bbd0590 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -9,10 +9,8 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
-#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
index be60c0d546a3..0aa39156f2fa 100644
--- a/drivers/gpu/drm/sprd/sprd_drm.c
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -5,10 +5,11 @@
#include <linux/component.h>
#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_graph.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
index ab0e5cce7adb..d7b143a75601 100644
--- a/drivers/gpu/drm/sprd/sprd_dsi.c
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -5,10 +5,8 @@
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/of_graph.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 142a8e1b4436..33487a1fed8f 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -258,10 +258,9 @@ static int sti_compositor_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sti_compositor_ops);
}
-static int sti_compositor_remove(struct platform_device *pdev)
+static void sti_compositor_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sti_compositor_ops);
- return 0;
}
struct platform_driver sti_compositor_driver = {
@@ -270,7 +269,7 @@ struct platform_driver sti_compositor_driver = {
.of_match_table = compositor_of_match,
},
.probe = sti_compositor_probe,
- .remove = sti_compositor_remove,
+ .remove_new = sti_compositor_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 1b87b5899f9e..2390c1bb6596 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -8,7 +8,9 @@
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -246,11 +248,9 @@ static int sti_platform_probe(struct platform_device *pdev)
return component_master_add_with_match(dev, &sti_ops, match);
}
-static int sti_platform_remove(struct platform_device *pdev)
+static void sti_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &sti_ops);
-
- return 0;
}
static const struct of_device_id sti_dt_ids[] = {
@@ -261,7 +261,7 @@ MODULE_DEVICE_TABLE(of, sti_dt_ids);
static struct platform_driver sti_platform_driver = {
.probe = sti_platform_probe,
- .remove = sti_platform_remove,
+ .remove_new = sti_platform_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = sti_dt_ids,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 0c6679e361c8..fd1df4ce3852 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -567,10 +567,9 @@ static int sti_dvo_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sti_dvo_ops);
}
-static int sti_dvo_remove(struct platform_device *pdev)
+static void sti_dvo_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sti_dvo_ops);
- return 0;
}
static const struct of_device_id dvo_of_match[] = {
@@ -586,7 +585,7 @@ struct platform_driver sti_dvo_driver = {
.of_match_table = dvo_of_match,
},
.probe = sti_dvo_probe,
- .remove = sti_dvo_remove,
+ .remove_new = sti_dvo_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 15097ac67931..6ee35612a14e 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -792,10 +792,9 @@ static int sti_hda_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sti_hda_ops);
}
-static int sti_hda_remove(struct platform_device *pdev)
+static void sti_hda_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sti_hda_ops);
- return 0;
}
static const struct of_device_id hda_of_match[] = {
@@ -812,7 +811,7 @@ struct platform_driver sti_hda_driver = {
.of_match_table = hda_of_match,
},
.probe = sti_hda_probe,
- .remove = sti_hda_remove,
+ .remove_new = sti_hda_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index dc1562f14ceb..500936d5743c 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1472,7 +1472,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
return ret;
}
-static int sti_hdmi_remove(struct platform_device *pdev)
+static void sti_hdmi_remove(struct platform_device *pdev)
{
struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
@@ -1480,8 +1480,6 @@ static int sti_hdmi_remove(struct platform_device *pdev)
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
-
- return 0;
}
struct platform_driver sti_hdmi_driver = {
@@ -1491,7 +1489,7 @@ struct platform_driver sti_hdmi_driver = {
.of_match_table = hdmi_of_match,
},
.probe = sti_hdmi_probe,
- .remove = sti_hdmi_remove,
+ .remove_new = sti_hdmi_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 02b77279f6e4..0fb48ac044d8 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1400,10 +1400,9 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sti_hqvdp_ops);
}
-static int sti_hqvdp_remove(struct platform_device *pdev)
+static void sti_hqvdp_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sti_hqvdp_ops);
- return 0;
}
static const struct of_device_id hqvdp_of_match[] = {
@@ -1419,7 +1418,7 @@ struct platform_driver sti_hqvdp_driver = {
.of_match_table = hqvdp_of_match,
},
.probe = sti_hqvdp_probe,
- .remove = sti_hqvdp_remove,
+ .remove_new = sti_hqvdp_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 2499715a69b7..64615638b79a 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -872,10 +872,9 @@ static int sti_tvout_probe(struct platform_device *pdev)
return component_add(dev, &sti_tvout_ops);
}
-static int sti_tvout_remove(struct platform_device *pdev)
+static void sti_tvout_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sti_tvout_ops);
- return 0;
}
static const struct of_device_id tvout_of_match[] = {
@@ -891,7 +890,7 @@ struct platform_driver sti_tvout_driver = {
.of_match_table = tvout_of_match,
},
.probe = sti_tvout_probe,
- .remove = sti_tvout_remove,
+ .remove_new = sti_tvout_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index cb4404b3ce62..c68c831136c9 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -10,8 +10,9 @@
#include <linux/component.h>
#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_aperture.h>
@@ -213,7 +214,7 @@ err_put:
return ret;
}
-static int stm_drm_platform_remove(struct platform_device *pdev)
+static void stm_drm_platform_remove(struct platform_device *pdev)
{
struct drm_device *ddev = platform_get_drvdata(pdev);
@@ -222,8 +223,6 @@ static int stm_drm_platform_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
drv_unload(ddev);
drm_dev_put(ddev);
-
- return 0;
}
static const struct of_device_id drv_dt_ids[] = {
@@ -234,7 +233,7 @@ MODULE_DEVICE_TABLE(of, drv_dt_ids);
static struct platform_driver stm_drm_platform_driver = {
.probe = stm_drm_platform_probe,
- .remove = stm_drm_platform_remove,
+ .remove_new = stm_drm_platform_remove,
.driver = {
.name = "stm32-display",
.of_match_table = drv_dt_ids,
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 1750b6a25e87..d5f8c923d7bc 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -535,15 +535,13 @@ err_clk_get:
return ret;
}
-static int dw_mipi_dsi_stm_remove(struct platform_device *pdev)
+static void dw_mipi_dsi_stm_remove(struct platform_device *pdev)
{
struct dw_mipi_dsi_stm *dsi = platform_get_drvdata(pdev);
dw_mipi_dsi_remove(dsi->dsi);
clk_disable_unprepare(dsi->pllref_clk);
regulator_disable(dsi->vdd_supply);
-
- return 0;
}
static int __maybe_unused dw_mipi_dsi_stm_suspend(struct device *dev)
@@ -588,7 +586,7 @@ static const struct dev_pm_ops dw_mipi_dsi_stm_pm_ops = {
static struct platform_driver dw_mipi_dsi_stm_driver = {
.probe = dw_mipi_dsi_stm_probe,
- .remove = dw_mipi_dsi_stm_remove,
+ .remove_new = dw_mipi_dsi_stm_remove,
.driver = {
.of_match_table = dw_mipi_dsi_stm_dt_ids,
.name = "stm32-display-dsi",
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index b8be4c1db423..5576fdae4962 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
-#include <linux/of_address.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index b11dbd50d73e..335fd0edb904 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -965,11 +965,9 @@ static int sun4i_backend_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sun4i_backend_ops);
}
-static int sun4i_backend_remove(struct platform_device *pdev)
+static void sun4i_backend_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun4i_backend_ops);
-
- return 0;
}
static const struct sun4i_backend_quirks sun4i_backend_quirks = {
@@ -1028,7 +1026,7 @@ MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
static struct platform_driver sun4i_backend_platform_driver = {
.probe = sun4i_backend_probe,
- .remove = sun4i_backend_remove,
+ .remove_new = sun4i_backend_remove,
.driver = {
.name = "sun4i-backend",
.of_match_table = sun4i_backend_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index daa7faf72a4b..6a8dfc022d3c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -408,11 +408,9 @@ static int sun4i_drv_probe(struct platform_device *pdev)
return 0;
}
-static int sun4i_drv_remove(struct platform_device *pdev)
+static void sun4i_drv_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &sun4i_drv_master_ops);
-
- return 0;
}
static const struct of_device_id sun4i_drv_of_table[] = {
@@ -438,7 +436,7 @@ MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
static struct platform_driver sun4i_drv_platform_driver = {
.probe = sun4i_drv_probe,
- .remove = sun4i_drv_remove,
+ .remove_new = sun4i_drv_remove,
.driver = {
.name = "sun4i-drm",
.of_match_table = sun4i_drv_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index 799ab7460ae5..280d444dbb66 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -7,7 +7,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -634,11 +634,9 @@ static int sun4i_frontend_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sun4i_frontend_ops);
}
-static int sun4i_frontend_remove(struct platform_device *pdev)
+static void sun4i_frontend_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun4i_frontend_ops);
-
- return 0;
}
static int sun4i_frontend_runtime_resume(struct device *dev)
@@ -719,7 +717,7 @@ MODULE_DEVICE_TABLE(of, sun4i_frontend_of_table);
static struct platform_driver sun4i_frontend_driver = {
.probe = sun4i_frontend_probe,
- .remove = sun4i_frontend_remove,
+ .remove_new = sun4i_frontend_remove,
.driver = {
.name = "sun4i-frontend",
.of_match_table = sun4i_frontend_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h
index 2e7b76e50c2b..61c24088772c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h
@@ -8,6 +8,7 @@
#define _SUN4I_FRONTEND_H_
#include <linux/list.h>
+#include <linux/mod_devicetable.h>
#define SUN4I_FRONTEND_EN_REG 0x000
#define SUN4I_FRONTEND_EN_EN BIT(0)
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index c0df5e892fa7..152375f3de2e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -10,7 +10,7 @@
#include <linux/i2c.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -693,11 +693,9 @@ static int sun4i_hdmi_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sun4i_hdmi_ops);
}
-static int sun4i_hdmi_remove(struct platform_device *pdev)
+static void sun4i_hdmi_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun4i_hdmi_ops);
-
- return 0;
}
static const struct of_device_id sun4i_hdmi_of_table[] = {
@@ -710,7 +708,7 @@ MODULE_DEVICE_TABLE(of, sun4i_hdmi_of_table);
static struct platform_driver sun4i_hdmi_driver = {
.probe = sun4i_hdmi_probe,
- .remove = sun4i_hdmi_remove,
+ .remove_new = sun4i_hdmi_remove,
.driver = {
.name = "sun4i-hdmi",
.of_match_table = sun4i_hdmi_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
index c7d7e9fff91c..d1a65a921f5a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
@@ -304,7 +304,7 @@ int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi)
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DDC;
adap->algo = &sun4i_hdmi_i2c_algorithm;
- strlcpy(adap->name, "sun4i_hdmi_i2c adapter", sizeof(adap->name));
+ strscpy(adap->name, "sun4i_hdmi_i2c adapter", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
ret = i2c_add_adapter(adap);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 6a52fb12cbfb..a1a2c845ade0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -10,9 +10,9 @@
#include <linux/ioport.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -1331,11 +1331,9 @@ static int sun4i_tcon_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sun4i_tcon_ops);
}
-static int sun4i_tcon_remove(struct platform_device *pdev)
+static void sun4i_tcon_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun4i_tcon_ops);
-
- return 0;
}
/* platform specific TCON muxing callbacks */
@@ -1570,7 +1568,7 @@ EXPORT_SYMBOL(sun4i_tcon_of_table);
static struct platform_driver sun4i_tcon_platform_driver = {
.probe = sun4i_tcon_probe,
- .remove = sun4i_tcon_remove,
+ .remove_new = sun4i_tcon_remove,
.driver = {
.name = "sun4i-tcon",
.of_match_table = sun4i_tcon_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 9625a00a48ba..ec65d9d59de7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -546,11 +546,9 @@ static int sun4i_tv_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sun4i_tv_ops);
}
-static int sun4i_tv_remove(struct platform_device *pdev)
+static void sun4i_tv_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun4i_tv_ops);
-
- return 0;
}
static const struct of_device_id sun4i_tv_of_table[] = {
@@ -561,7 +559,7 @@ MODULE_DEVICE_TABLE(of, sun4i_tv_of_table);
static struct platform_driver sun4i_tv_platform_driver = {
.probe = sun4i_tv_probe,
- .remove = sun4i_tv_remove,
+ .remove_new = sun4i_tv_remove,
.driver = {
.name = "sun4i-tve",
.of_match_table = sun4i_tv_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index 4fbe9a6b5182..0d342f43fa93 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -95,11 +95,9 @@ static int sun6i_drc_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sun6i_drc_ops);
}
-static int sun6i_drc_remove(struct platform_device *pdev)
+static void sun6i_drc_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun6i_drc_ops);
-
- return 0;
}
static const struct of_device_id sun6i_drc_of_table[] = {
@@ -114,7 +112,7 @@ MODULE_DEVICE_TABLE(of, sun6i_drc_of_table);
static struct platform_driver sun6i_drc_platform_driver = {
.probe = sun6i_drc_probe,
- .remove = sun6i_drc_remove,
+ .remove_new = sun6i_drc_remove,
.driver = {
.name = "sun6i-drc",
.of_match_table = sun6i_drc_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 760ff05eabf4..4abf4f102007 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -1200,7 +1200,7 @@ err_attach_clk:
return ret;
}
-static int sun6i_dsi_remove(struct platform_device *pdev)
+static void sun6i_dsi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
@@ -1211,8 +1211,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
clk_rate_exclusive_put(dsi->mod_clk);
regmap_mmio_detach_clk(dsi->regs);
-
- return 0;
}
static const struct sun6i_dsi_variant sun6i_a31_mipi_dsi_variant = {
@@ -1246,7 +1244,7 @@ MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
static struct platform_driver sun6i_dsi_platform_driver = {
.probe = sun6i_dsi_probe,
- .remove = sun6i_dsi_remove,
+ .remove_new = sun6i_dsi_remove,
.driver = {
.name = "sun6i-mipi-dsi",
.of_match_table = sun6i_dsi_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 7cab4213a680..4727dfaa8fb9 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -5,7 +5,7 @@
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -235,11 +235,9 @@ static int sun8i_dw_hdmi_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sun8i_dw_hdmi_ops);
}
-static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
+static void sun8i_dw_hdmi_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun8i_dw_hdmi_ops);
-
- return 0;
}
static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
@@ -266,7 +264,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
.probe = sun8i_dw_hdmi_probe,
- .remove = sun8i_dw_hdmi_remove,
+ .remove_new = sun8i_dw_hdmi_remove,
.driver = {
.name = "sun8i-dw-hdmi",
.of_match_table = sun8i_dw_hdmi_dt_ids,
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index ca53b5e9fffc..4fa69c463dc4 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -4,8 +4,9 @@
*/
#include <linux/delay.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include "sun8i_dw_hdmi.h"
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 11d5244a5aa5..01382860aaee 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -10,8 +10,10 @@
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
@@ -555,11 +557,9 @@ static int sun8i_mixer_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sun8i_mixer_ops);
}
-static int sun8i_mixer_remove(struct platform_device *pdev)
+static void sun8i_mixer_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun8i_mixer_ops);
-
- return 0;
}
static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
@@ -711,7 +711,7 @@ MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
static struct platform_driver sun8i_mixer_platform_driver = {
.probe = sun8i_mixer_probe,
- .remove = sun8i_mixer_remove,
+ .remove_new = sun8i_mixer_remove,
.driver = {
.name = "sun8i-mixer",
.of_match_table = sun8i_mixer_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index da97682b6835..6f076cf4b403 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -7,7 +7,7 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
@@ -261,11 +261,9 @@ static int sun8i_tcon_top_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &sun8i_tcon_top_ops);
}
-static int sun8i_tcon_top_remove(struct platform_device *pdev)
+static void sun8i_tcon_top_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &sun8i_tcon_top_ops);
-
- return 0;
}
static const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
@@ -302,7 +300,7 @@ EXPORT_SYMBOL(sun8i_tcon_top_of_table);
static struct platform_driver sun8i_tcon_top_platform_driver = {
.probe = sun8i_tcon_top_probe,
- .remove = sun8i_tcon_top_remove,
+ .remove_new = sun8i_tcon_top_remove,
.driver = {
.name = "sun8i-tcon-top",
.of_match_table = sun8i_tcon_top_of_table,
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 498313778175..84e7e6bc3a0c 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -12,7 +12,7 @@ config DRM_TEGRA
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
- select FB_SYS_HELPERS if DRM_FBDEV_EMULATION
+ select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
select TEGRA_HOST1X
select INTERCONNECT
select IOMMU_IOVA
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 6e78416e64b0..13b182ab905f 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -11,7 +11,8 @@
#include <linux/iommu.h>
#include <linux/interconnect.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 4d2677dcd831..ef02d530f78d 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -8,7 +8,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
@@ -447,7 +447,6 @@ static const struct pinmux_ops tegra_dpaux_pinmux_ops = {
static int tegra_dpaux_probe(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux;
- struct resource *regs;
u32 value;
int err;
@@ -461,14 +460,13 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dpaux->list);
dpaux->dev = &pdev->dev;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dpaux->regs = devm_ioremap_resource(&pdev->dev, regs);
+ dpaux->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dpaux->regs))
return PTR_ERR(dpaux->regs);
dpaux->irq = platform_get_irq(pdev, 0);
if (dpaux->irq < 0)
- return -ENXIO;
+ return dpaux->irq;
if (!pdev->dev.pm_domain) {
dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 35ff303c6674..ff36171c8fb7 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -887,8 +887,6 @@ static const struct drm_driver tegra_drm_driver = {
.debugfs_init = tegra_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = tegra_gem_prime_import,
.dumb_create = tegra_bo_dumb_create,
diff --git a/drivers/gpu/drm/tegra/fbdev.c b/drivers/gpu/drm/tegra/fbdev.c
index e74d9be981c7..db6eaac3d30e 100644
--- a/drivers/gpu/drm/tegra/fbdev.c
+++ b/drivers/gpu/drm/tegra/fbdev.c
@@ -59,9 +59,9 @@ static void tegra_fbdev_fb_destroy(struct fb_info *info)
static const struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
- __FB_DEFAULT_SYS_OPS_RDWR,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
- __FB_DEFAULT_SYS_OPS_DRAW,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_mmap = tegra_fb_mmap,
.fb_destroy = tegra_fbdev_fb_destroy,
};
@@ -132,7 +132,8 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
}
}
- info->screen_base = (void __iomem *)bo->vaddr + offset;
+ info->flags |= FBINFO_VIRTFB;
+ info->screen_buffer = bo->vaddr + offset;
info->screen_size = size;
info->fix.smem_start = (unsigned long)(bo->iova + offset);
info->fix.smem_len = size;
@@ -225,10 +226,6 @@ void tegra_fbdev_setup(struct drm_device *dev)
if (ret)
goto err_drm_client_init;
- ret = tegra_fbdev_client_hotplug(&helper->client);
- if (ret)
- drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
-
drm_client_register(&helper->client);
return;
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index dea38892d6e6..a4023163493d 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -694,8 +694,6 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
struct drm_gem_object *gem = buf->priv;
int err;
- dma_resv_assert_held(buf->resv);
-
err = drm_gem_mmap_obj(gem, gem->size, vma);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 50f77fddda54..a160d01f26e1 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -7,7 +7,8 @@
#include <linux/delay.h>
#include <linux/iommu.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -70,22 +71,15 @@ static int gr2d_init(struct host1x_client *client)
goto free;
}
- pm_runtime_enable(client->dev);
- pm_runtime_use_autosuspend(client->dev);
- pm_runtime_set_autosuspend_delay(client->dev, 200);
-
err = tegra_drm_register_client(dev->dev_private, drm);
if (err < 0) {
dev_err(client->dev, "failed to register client: %d\n", err);
- goto disable_rpm;
+ goto detach_iommu;
}
return 0;
-disable_rpm:
- pm_runtime_dont_use_autosuspend(client->dev);
- pm_runtime_force_suspend(client->dev);
-
+detach_iommu:
host1x_client_iommu_detach(client);
free:
host1x_syncpt_put(client->syncpts[0]);
@@ -299,6 +293,7 @@ static void gr2d_remove(struct platform_device *pdev)
{
struct gr2d *gr2d = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&gr2d->client.base);
}
@@ -372,6 +367,10 @@ static int __maybe_unused gr2d_runtime_resume(struct device *dev)
goto disable_clk;
}
+ pm_runtime_enable(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 500);
+
return 0;
disable_clk:
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index c026c2c916c1..00c8564520e7 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -9,7 +9,7 @@
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
@@ -80,22 +80,15 @@ static int gr3d_init(struct host1x_client *client)
goto free;
}
- pm_runtime_enable(client->dev);
- pm_runtime_use_autosuspend(client->dev);
- pm_runtime_set_autosuspend_delay(client->dev, 200);
-
err = tegra_drm_register_client(dev->dev_private, drm);
if (err < 0) {
dev_err(client->dev, "failed to register client: %d\n", err);
- goto disable_rpm;
+ goto detach_iommu;
}
return 0;
-disable_rpm:
- pm_runtime_dont_use_autosuspend(client->dev);
- pm_runtime_force_suspend(client->dev);
-
+detach_iommu:
host1x_client_iommu_detach(client);
free:
host1x_syncpt_put(client->syncpts[0]);
@@ -554,6 +547,7 @@ static void gr3d_remove(struct platform_device *pdev)
{
struct gr3d *gr3d = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&gr3d->client.base);
}
@@ -607,6 +601,10 @@ static int __maybe_unused gr3d_runtime_resume(struct device *dev)
goto disable_clk;
}
+ pm_runtime_enable(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 500);
+
return 0;
disable_clk:
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 6eac54ae1205..80c760986d9e 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -10,7 +10,8 @@
#include <linux/hdmi.h>
#include <linux/math64.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -19,6 +20,7 @@
#include <soc/tegra/common.h>
#include <sound/hdmi-codec.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
@@ -1544,26 +1546,47 @@ static int tegra_hdmi_init(struct host1x_client *client)
{
struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
struct drm_device *drm = dev_get_drvdata(client->host);
+ struct drm_connector *connector;
int err;
hdmi->output.dev = client->dev;
- drm_connector_init_with_ddc(drm, &hdmi->output.connector,
- &tegra_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- hdmi->output.ddc);
- drm_connector_helper_add(&hdmi->output.connector,
- &tegra_hdmi_connector_helper_funcs);
- hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
-
drm_simple_encoder_init(drm, &hdmi->output.encoder,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&hdmi->output.encoder,
&tegra_hdmi_encoder_helper_funcs);
- drm_connector_attach_encoder(&hdmi->output.connector,
- &hdmi->output.encoder);
- drm_connector_register(&hdmi->output.connector);
+ if (hdmi->output.bridge) {
+ err = drm_bridge_attach(&hdmi->output.encoder, hdmi->output.bridge,
+ NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (err) {
+ dev_err(client->dev, "failed to attach bridge: %d\n",
+ err);
+ return err;
+ }
+
+ connector = drm_bridge_connector_init(drm, &hdmi->output.encoder);
+ if (IS_ERR(connector)) {
+ dev_err(client->dev,
+ "failed to initialize bridge connector: %pe\n",
+ connector);
+ return PTR_ERR(connector);
+ }
+
+ drm_connector_attach_encoder(connector, &hdmi->output.encoder);
+ } else {
+ drm_connector_init_with_ddc(drm, &hdmi->output.connector,
+ &tegra_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->output.ddc);
+ drm_connector_helper_add(&hdmi->output.connector,
+ &tegra_hdmi_connector_helper_funcs);
+ hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
+
+ drm_connector_attach_encoder(&hdmi->output.connector,
+ &hdmi->output.encoder);
+ drm_connector_register(&hdmi->output.connector);
+ }
err = tegra_output_init(drm, &hdmi->output);
if (err < 0) {
@@ -1769,7 +1792,6 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
static int tegra_hdmi_probe(struct platform_device *pdev)
{
struct tegra_hdmi *hdmi;
- struct resource *regs;
int err;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -1831,8 +1853,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
if (err < 0)
return err;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 916857361a91..1af5f8318d91 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -9,8 +9,8 @@
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c
index ae78a81e5eef..4860790666af 100644
--- a/drivers/gpu/drm/tegra/nvdec.c
+++ b/drivers/gpu/drm/tegra/nvdec.c
@@ -11,8 +11,6 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -177,13 +175,9 @@ static int nvdec_init(struct host1x_client *client)
goto free_channel;
}
- pm_runtime_enable(client->dev);
- pm_runtime_use_autosuspend(client->dev);
- pm_runtime_set_autosuspend_delay(client->dev, 500);
-
err = tegra_drm_register_client(tegra, drm);
if (err < 0)
- goto disable_rpm;
+ goto free_syncpt;
/*
* Inherit the DMA parameters (such as maximum segment size) from the
@@ -193,10 +187,7 @@ static int nvdec_init(struct host1x_client *client)
return 0;
-disable_rpm:
- pm_runtime_dont_use_autosuspend(client->dev);
- pm_runtime_force_suspend(client->dev);
-
+free_syncpt:
host1x_syncpt_put(client->syncpts[0]);
free_channel:
host1x_channel_put(nvdec->channel);
@@ -276,6 +267,8 @@ static int nvdec_load_falcon_firmware(struct nvdec *nvdec)
return err;
} else {
virt = tegra_drm_alloc(tegra, size, &iova);
+ if (IS_ERR(virt))
+ return PTR_ERR(virt);
}
nvdec->falcon.firmware.virt = virt;
@@ -539,6 +532,10 @@ static int nvdec_probe(struct platform_device *pdev)
goto exit_falcon;
}
+ pm_runtime_enable(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 500);
+
return 0;
exit_falcon:
@@ -551,8 +548,8 @@ static void nvdec_remove(struct platform_device *pdev)
{
struct nvdec *nvdec = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&nvdec->client.base);
-
falcon_exit(&nvdec->falcon);
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index fbb63d755496..61b437a84806 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -8,7 +8,7 @@
#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -586,6 +586,7 @@ static u8 tegra_clk_sor_pad_get_parent(struct clk_hw *hw)
}
static const struct clk_ops tegra_clk_sor_pad_ops = {
+ .determine_rate = clk_hw_determine_rate_no_reparent,
.set_parent = tegra_clk_sor_pad_set_parent,
.get_parent = tegra_clk_sor_pad_get_parent,
};
@@ -3707,7 +3708,6 @@ static int tegra_sor_probe(struct platform_device *pdev)
{
struct device_node *np;
struct tegra_sor *sor;
- struct resource *regs;
int err;
sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
@@ -3780,8 +3780,7 @@ static int tegra_sor_probe(struct platform_device *pdev)
}
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sor->regs = devm_ioremap_resource(&pdev->dev, regs);
+ sor->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sor->regs)) {
err = PTR_ERR(sor->regs);
goto remove;
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index da7a038dca20..73c356f1c901 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -10,8 +10,6 @@
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -143,13 +141,9 @@ static int vic_init(struct host1x_client *client)
goto free_channel;
}
- pm_runtime_enable(client->dev);
- pm_runtime_use_autosuspend(client->dev);
- pm_runtime_set_autosuspend_delay(client->dev, 500);
-
err = tegra_drm_register_client(tegra, drm);
if (err < 0)
- goto disable_rpm;
+ goto free_syncpt;
/*
* Inherit the DMA parameters (such as maximum segment size) from the
@@ -159,10 +153,7 @@ static int vic_init(struct host1x_client *client)
return 0;
-disable_rpm:
- pm_runtime_dont_use_autosuspend(client->dev);
- pm_runtime_force_suspend(client->dev);
-
+free_syncpt:
host1x_syncpt_put(client->syncpts[0]);
free_channel:
host1x_channel_put(vic->channel);
@@ -529,6 +520,10 @@ static int vic_probe(struct platform_device *pdev)
goto exit_falcon;
}
+ pm_runtime_enable(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 500);
+
return 0;
exit_falcon:
@@ -541,8 +536,8 @@ static void vic_remove(struct platform_device *pdev)
{
struct vic *vic = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&vic->client.base);
-
falcon_exit(&vic->falcon);
}
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index bca726a8f483..ba7baa622675 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_modes_test.o \
drm_plane_helper_test.o \
drm_probe_helper_test.o \
- drm_rect_test.o
+ drm_rect_test.o \
+ drm_exec_test.o
CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c
index 416a279b6dae..7516f6cb36e4 100644
--- a/drivers/gpu/drm/tests/drm_client_modeset_test.c
+++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c
@@ -82,13 +82,6 @@ static int drm_client_modeset_test_init(struct kunit *test)
return 0;
}
-static void drm_client_modeset_test_exit(struct kunit *test)
-{
- struct drm_client_modeset_test_priv *priv = test->priv;
-
- drm_kunit_helper_free_device(test, priv->dev);
-}
-
static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
{
struct drm_client_modeset_test_priv *priv = test->priv;
@@ -188,7 +181,6 @@ static struct kunit_case drm_test_pick_cmdline_tests[] = {
static struct kunit_suite drm_test_pick_cmdline_test_suite = {
.name = "drm_test_pick_cmdline",
.init = drm_client_modeset_test_init,
- .exit = drm_client_modeset_test_exit,
.test_cases = drm_test_pick_cmdline_tests
};
diff --git a/drivers/gpu/drm/tests/drm_exec_test.c b/drivers/gpu/drm/tests/drm_exec_test.c
new file mode 100644
index 000000000000..563949d777dd
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_exec_test.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ */
+
+#define pr_fmt(fmt) "drm_exec: " fmt
+
+#include <kunit/test.h>
+
+#include <linux/module.h>
+#include <linux/prime_numbers.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include "../lib/drm_random.h"
+
+struct drm_exec_priv {
+ struct device *dev;
+ struct drm_device *drm;
+};
+
+static int drm_exec_test_init(struct kunit *test)
+{
+ struct drm_exec_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ test->priv = priv;
+
+ priv->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev, sizeof(*priv->drm), 0,
+ DRIVER_MODESET);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
+
+ return 0;
+}
+
+static void sanitycheck(struct kunit *test)
+{
+ struct drm_exec exec;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_fini(&exec);
+ KUNIT_SUCCEED(test);
+}
+
+static void test_lock(struct kunit *test)
+{
+ struct drm_exec_priv *priv = test->priv;
+ struct drm_gem_object gobj = { };
+ struct drm_exec exec;
+ int ret;
+
+ drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec) {
+ ret = drm_exec_lock_obj(&exec, &gobj);
+ drm_exec_retry_on_contention(&exec);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ break;
+ }
+ drm_exec_fini(&exec);
+}
+
+static void test_lock_unlock(struct kunit *test)
+{
+ struct drm_exec_priv *priv = test->priv;
+ struct drm_gem_object gobj = { };
+ struct drm_exec exec;
+ int ret;
+
+ drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec) {
+ ret = drm_exec_lock_obj(&exec, &gobj);
+ drm_exec_retry_on_contention(&exec);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ break;
+
+ drm_exec_unlock_obj(&exec, &gobj);
+ ret = drm_exec_lock_obj(&exec, &gobj);
+ drm_exec_retry_on_contention(&exec);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ break;
+ }
+ drm_exec_fini(&exec);
+}
+
+static void test_duplicates(struct kunit *test)
+{
+ struct drm_exec_priv *priv = test->priv;
+ struct drm_gem_object gobj = { };
+ struct drm_exec exec;
+ int ret;
+
+ drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_until_all_locked(&exec) {
+ ret = drm_exec_lock_obj(&exec, &gobj);
+ drm_exec_retry_on_contention(&exec);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ break;
+
+ ret = drm_exec_lock_obj(&exec, &gobj);
+ drm_exec_retry_on_contention(&exec);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ break;
+ }
+ drm_exec_unlock_obj(&exec, &gobj);
+ drm_exec_fini(&exec);
+}
+
+static void test_prepare(struct kunit *test)
+{
+ struct drm_exec_priv *priv = test->priv;
+ struct drm_gem_object gobj = { };
+ struct drm_exec exec;
+ int ret;
+
+ drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec) {
+ ret = drm_exec_prepare_obj(&exec, &gobj, 1);
+ drm_exec_retry_on_contention(&exec);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ break;
+ }
+ drm_exec_fini(&exec);
+
+ drm_gem_private_object_fini(&gobj);
+}
+
+static void test_prepare_array(struct kunit *test)
+{
+ struct drm_exec_priv *priv = test->priv;
+ struct drm_gem_object gobj1 = { };
+ struct drm_gem_object gobj2 = { };
+ struct drm_gem_object *array[] = { &gobj1, &gobj2 };
+ struct drm_exec exec;
+ int ret;
+
+ drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE);
+ drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec)
+ ret = drm_exec_prepare_array(&exec, array, ARRAY_SIZE(array),
+ 1);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ drm_exec_fini(&exec);
+
+ drm_gem_private_object_fini(&gobj1);
+ drm_gem_private_object_fini(&gobj2);
+}
+
+static void test_multiple_loops(struct kunit *test)
+{
+ struct drm_exec exec;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec)
+ {
+ break;
+ }
+ drm_exec_fini(&exec);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_until_all_locked(&exec)
+ {
+ break;
+ }
+ drm_exec_fini(&exec);
+ KUNIT_SUCCEED(test);
+}
+
+static struct kunit_case drm_exec_tests[] = {
+ KUNIT_CASE(sanitycheck),
+ KUNIT_CASE(test_lock),
+ KUNIT_CASE(test_lock_unlock),
+ KUNIT_CASE(test_duplicates),
+ KUNIT_CASE(test_prepare),
+ KUNIT_CASE(test_prepare_array),
+ KUNIT_CASE(test_multiple_loops),
+ {}
+};
+
+static struct kunit_suite drm_exec_test_suite = {
+ .name = "drm_exec",
+ .init = drm_exec_test_init,
+ .test_cases = drm_exec_tests,
+};
+
+kunit_test_suite(drm_exec_test_suite);
+
+MODULE_AUTHOR("AMD");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/tests/drm_framebuffer_test.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c
index df235b7fdaa5..f759d9f3b76e 100644
--- a/drivers/gpu/drm/tests/drm_framebuffer_test.c
+++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c
@@ -178,13 +178,13 @@ static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = {
.handles = { 1, 1, 1 }, .pitches = { 600, 600, 600 },
}
},
-{ .buffer_created = 1, .name = "YVU420 Normal sizes",
+{ .buffer_created = 1, .name = "YVU420 DRM_MODE_FB_MODIFIERS set without modifier",
.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
.pitches = { 600, 300, 300 },
}
},
-{ .buffer_created = 1, .name = "YVU420 DRM_MODE_FB_MODIFIERS set without modifier",
+{ .buffer_created = 1, .name = "YVU420 Normal sizes",
.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .pitches = { 600, 300, 300 },
}
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 4df47071dc88..3d624ff2f651 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <drm/drm_atomic.h>
#include <drm/drm_drv.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
@@ -26,6 +27,28 @@ static struct platform_driver fake_platform_driver = {
},
};
+static void kunit_action_platform_driver_unregister(void *ptr)
+{
+ struct platform_driver *drv = ptr;
+
+ platform_driver_unregister(drv);
+
+}
+
+static void kunit_action_platform_device_put(void *ptr)
+{
+ struct platform_device *pdev = ptr;
+
+ platform_device_put(pdev);
+}
+
+static void kunit_action_platform_device_del(void *ptr)
+{
+ struct platform_device *pdev = ptr;
+
+ platform_device_del(pdev);
+}
+
/**
* drm_kunit_helper_alloc_device - Allocate a mock device for a KUnit test
* @test: The test context object
@@ -35,8 +58,8 @@ static struct platform_driver fake_platform_driver = {
* able to leverage the usual infrastructure and most notably the
* device-managed resources just like a "real" device.
*
- * Callers need to make sure drm_kunit_helper_free_device() on the
- * device when done.
+ * Resources will be cleaned up automatically, but the removal can be
+ * forced using @drm_kunit_helper_free_device.
*
* Returns:
* A pointer to the new device, or an ERR_PTR() otherwise.
@@ -49,12 +72,27 @@ struct device *drm_kunit_helper_alloc_device(struct kunit *test)
ret = platform_driver_register(&fake_platform_driver);
KUNIT_ASSERT_EQ(test, ret, 0);
+ ret = kunit_add_action_or_reset(test,
+ kunit_action_platform_driver_unregister,
+ &fake_platform_driver);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
pdev = platform_device_alloc(KUNIT_DEVICE_NAME, PLATFORM_DEVID_NONE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+ ret = kunit_add_action_or_reset(test,
+ kunit_action_platform_device_put,
+ pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
ret = platform_device_add(pdev);
KUNIT_ASSERT_EQ(test, ret, 0);
+ ret = kunit_add_action_or_reset(test,
+ kunit_action_platform_device_del,
+ pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
return &pdev->dev;
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_alloc_device);
@@ -70,8 +108,17 @@ void drm_kunit_helper_free_device(struct kunit *test, struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
- platform_device_unregister(pdev);
- platform_driver_unregister(&fake_platform_driver);
+ kunit_release_action(test,
+ kunit_action_platform_device_del,
+ pdev);
+
+ kunit_release_action(test,
+ kunit_action_platform_device_put,
+ pdev);
+
+ kunit_release_action(test,
+ kunit_action_platform_driver_unregister,
+ pdev);
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_free_device);
@@ -100,5 +147,91 @@ __drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test,
}
EXPORT_SYMBOL_GPL(__drm_kunit_helper_alloc_drm_device_with_driver);
+static void action_drm_release_context(void *ptr)
+{
+ struct drm_modeset_acquire_ctx *ctx = ptr;
+
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+}
+
+/**
+ * drm_kunit_helper_context_alloc - Allocates an acquire context
+ * @test: The test context object
+ *
+ * Allocates and initializes a modeset acquire context.
+ *
+ * The context is tied to the kunit test context, so we must not call
+ * drm_modeset_acquire_fini() on it, it will be done so automatically.
+ *
+ * Returns:
+ * An ERR_PTR on error, a pointer to the newly allocated context otherwise
+ */
+struct drm_modeset_acquire_ctx *
+drm_kunit_helper_acquire_ctx_alloc(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx *ctx;
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ctx);
+
+ drm_modeset_acquire_init(ctx, 0);
+
+ ret = kunit_add_action_or_reset(test,
+ action_drm_release_context,
+ ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_acquire_ctx_alloc);
+
+static void kunit_action_drm_atomic_state_put(void *ptr)
+{
+ struct drm_atomic_state *state = ptr;
+
+ drm_atomic_state_put(state);
+}
+
+/**
+ * drm_kunit_helper_atomic_state_alloc - Allocates an atomic state
+ * @test: The test context object
+ * @drm: The device to alloc the state for
+ * @ctx: Locking context for that atomic update
+ *
+ * Allocates a empty atomic state.
+ *
+ * The state is tied to the kunit test context, so we must not call
+ * drm_atomic_state_put() on it, it will be done so automatically.
+ *
+ * Returns:
+ * An ERR_PTR on error, a pointer to the newly allocated state otherwise
+ */
+struct drm_atomic_state *
+drm_kunit_helper_atomic_state_alloc(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ int ret;
+
+ state = drm_atomic_state_alloc(drm);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ ret = kunit_add_action_or_reset(test,
+ kunit_action_drm_atomic_state_put,
+ state);
+ if (ret)
+ return ERR_PTR(ret);
+
+ state->acquire_ctx = ctx;
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_atomic_state_alloc);
+
MODULE_AUTHOR("Maxime Ripard <[email protected]>");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_modes_test.c b/drivers/gpu/drm/tests/drm_modes_test.c
index bc4aa2ce78be..1e9f63fbfead 100644
--- a/drivers/gpu/drm/tests/drm_modes_test.c
+++ b/drivers/gpu/drm/tests/drm_modes_test.c
@@ -36,13 +36,6 @@ static int drm_test_modes_init(struct kunit *test)
return 0;
}
-static void drm_test_modes_exit(struct kunit *test)
-{
- struct drm_test_modes_priv *priv = test->priv;
-
- drm_kunit_helper_free_device(test, priv->dev);
-}
-
static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
{
struct drm_test_modes_priv *priv = test->priv;
@@ -148,7 +141,6 @@ static struct kunit_case drm_modes_analog_tv_tests[] = {
static struct kunit_suite drm_modes_analog_tv_test_suite = {
.name = "drm_modes_analog_tv",
.init = drm_test_modes_init,
- .exit = drm_test_modes_exit,
.test_cases = drm_modes_analog_tv_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_probe_helper_test.c b/drivers/gpu/drm/tests/drm_probe_helper_test.c
index 0ee65828623e..1a2044070a6c 100644
--- a/drivers/gpu/drm/tests/drm_probe_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_probe_helper_test.c
@@ -60,13 +60,6 @@ static int drm_probe_helper_test_init(struct kunit *test)
return 0;
}
-static void drm_probe_helper_test_exit(struct kunit *test)
-{
- struct drm_probe_helper_test_priv *priv = test->priv;
-
- drm_kunit_helper_free_device(test, priv->dev);
-}
-
typedef struct drm_display_mode *(*expected_mode_func_t)(struct drm_device *);
struct drm_connector_helper_tv_get_modes_test {
@@ -208,7 +201,6 @@ static struct kunit_case drm_test_connector_helper_tv_get_modes_tests[] = {
static struct kunit_suite drm_test_connector_helper_tv_get_modes_suite = {
.name = "drm_connector_helper_tv_get_modes",
.init = drm_probe_helper_test_init,
- .exit = drm_probe_helper_test_exit,
.test_cases = drm_test_connector_helper_tv_get_modes_tests,
};
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index dca077411f77..9d9dee7abaef 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -15,8 +15,6 @@
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -275,6 +273,55 @@ const struct dispc_features dispc_j721e_feats = {
.vid_order = { 1, 3, 0, 2 },
};
+const struct dispc_features dispc_am625_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 165000,
+ [DISPC_VP_INTERNAL] = 170000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 2560,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_AM625,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 2,
+ .vp_name = { "vp1", "vp2" },
+ .ovr_name = { "ovr1", "ovr2" },
+ .vpclk_name = { "vp1", "vp2" },
+ .vp_bus_type = { DISPC_VP_INTERNAL, DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 2,
+ /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+ .vid_name = { "vid", "vidl1" },
+ .vid_lite = { false, true, },
+ .vid_order = { 1, 0 },
+};
+
static const u16 *dispc_common_regmap;
struct dss_vp_data {
@@ -776,6 +823,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
switch (dispc->feat->subrev) {
case DISPC_K2G:
return dispc_k2g_read_and_clear_irqstatus(dispc);
+ case DISPC_AM625:
case DISPC_AM65X:
case DISPC_J721E:
return dispc_k3_read_and_clear_irqstatus(dispc);
@@ -791,6 +839,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
case DISPC_K2G:
dispc_k2g_set_irqenable(dispc, mask);
break;
+ case DISPC_AM625:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_set_irqenable(dispc, mask);
@@ -1281,6 +1330,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
dispc_k2g_ovr_set_plane(dispc, hw_plane, hw_videoport,
x, y, layer);
break;
+ case DISPC_AM625:
case DISPC_AM65X:
dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
x, y, layer);
@@ -2199,6 +2249,7 @@ static void dispc_plane_init(struct dispc_device *dispc)
case DISPC_K2G:
dispc_k2g_plane_init(dispc);
break;
+ case DISPC_AM625:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_plane_init(dispc);
@@ -2305,6 +2356,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
case DISPC_K2G:
dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
break;
+ case DISPC_AM625:
case DISPC_AM65X:
dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
break;
@@ -2579,7 +2631,8 @@ int dispc_runtime_resume(struct dispc_device *dispc)
REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
- if (dispc->feat->subrev == DISPC_AM65X)
+ if (dispc->feat->subrev == DISPC_AM625 ||
+ dispc->feat->subrev == DISPC_AM65X)
dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index 946ed769caaf..33ac5ad7a423 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -59,6 +59,7 @@ enum dispc_vp_bus_type {
enum dispc_dss_subrevision {
DISPC_K2G,
+ DISPC_AM625,
DISPC_AM65X,
DISPC_J721E,
};
@@ -86,6 +87,7 @@ struct dispc_features {
};
extern const struct dispc_features dispc_k2g_feats;
+extern const struct dispc_features dispc_am625_feats;
extern const struct dispc_features dispc_am65x_feats;
extern const struct dispc_features dispc_j721e_feats;
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 3f5f27fb6ebc..4d063eb9cd0b 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -5,7 +5,7 @@
*/
#include <linux/console.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -197,7 +197,7 @@ err_runtime_suspend:
return ret;
}
-static int tidss_remove(struct platform_device *pdev)
+static void tidss_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tidss_device *tidss = platform_get_drvdata(pdev);
@@ -221,8 +221,6 @@ static int tidss_remove(struct platform_device *pdev)
dispc_remove(tidss);
dev_dbg(dev, "%s done\n", __func__);
-
- return 0;
}
static void tidss_shutdown(struct platform_device *pdev)
@@ -232,6 +230,7 @@ static void tidss_shutdown(struct platform_device *pdev)
static const struct of_device_id tidss_of_table[] = {
{ .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
+ { .compatible = "ti,am625-dss", .data = &dispc_am625_feats, },
{ .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
{ .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
{ }
@@ -241,7 +240,7 @@ MODULE_DEVICE_TABLE(of, tidss_of_table);
static struct platform_driver tidss_platform_driver = {
.probe = tidss_probe,
- .remove = tidss_remove,
+ .remove_new = tidss_remove,
.shutdown = tidss_shutdown,
.driver = {
.name = "tidss",
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
index 0d4865e9c03d..17a86bed8054 100644
--- a/drivers/gpu/drm/tidss/tidss_encoder.c
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -6,91 +6,125 @@
#include <linux/export.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "tidss_crtc.h"
#include "tidss_drv.h"
#include "tidss_encoder.h"
-static int tidss_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+struct tidss_encoder {
+ struct drm_bridge bridge;
+ struct drm_encoder encoder;
+ struct drm_connector *connector;
+ struct drm_bridge *next_bridge;
+ struct tidss_device *tidss;
+};
+
+static inline struct tidss_encoder
+*bridge_to_tidss_encoder(struct drm_bridge *b)
+{
+ return container_of(b, struct tidss_encoder, bridge);
+}
+
+static int tidss_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge);
+
+ return drm_bridge_attach(bridge->encoder, t_enc->next_bridge,
+ bridge, flags);
+}
+
+static int tidss_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- struct drm_device *ddev = encoder->dev;
+ struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge);
+ struct tidss_device *tidss = t_enc->tidss;
struct tidss_crtc_state *tcrtc_state = to_tidss_crtc_state(crtc_state);
struct drm_display_info *di = &conn_state->connector->display_info;
- struct drm_bridge *bridge;
- bool bus_flags_set = false;
-
- dev_dbg(ddev->dev, "%s\n", __func__);
-
- /*
- * Take the bus_flags from the first bridge that defines
- * bridge timings, or from the connector's display_info if no
- * bridge defines the timings.
- */
- drm_for_each_bridge_in_chain(encoder, bridge) {
- if (!bridge->timings)
- continue;
-
- tcrtc_state->bus_flags = bridge->timings->input_bus_flags;
- bus_flags_set = true;
- break;
- }
+ struct drm_bridge_state *next_bridge_state = NULL;
+
+ if (t_enc->next_bridge)
+ next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ t_enc->next_bridge);
- if (!di->bus_formats || di->num_bus_formats == 0) {
- dev_err(ddev->dev, "%s: No bus_formats in connected display\n",
+ if (next_bridge_state) {
+ tcrtc_state->bus_flags = next_bridge_state->input_bus_cfg.flags;
+ tcrtc_state->bus_format = next_bridge_state->input_bus_cfg.format;
+ } else if (di->num_bus_formats) {
+ tcrtc_state->bus_format = di->bus_formats[0];
+ tcrtc_state->bus_flags = di->bus_flags;
+ } else {
+ dev_err(tidss->dev, "%s: No bus_formats in connected display\n",
__func__);
return -EINVAL;
}
- // XXX any cleaner way to set bus format and flags?
- tcrtc_state->bus_format = di->bus_formats[0];
- if (!bus_flags_set)
- tcrtc_state->bus_flags = di->bus_flags;
-
return 0;
}
-static void tidss_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .atomic_check = tidss_encoder_atomic_check,
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = tidss_encoder_destroy,
+static const struct drm_bridge_funcs tidss_bridge_funcs = {
+ .attach = tidss_bridge_attach,
+ .atomic_check = tidss_bridge_atomic_check,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
};
-struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
- u32 encoder_type, u32 possible_crtcs)
+int tidss_encoder_create(struct tidss_device *tidss,
+ struct drm_bridge *next_bridge,
+ u32 encoder_type, u32 possible_crtcs)
{
+ struct tidss_encoder *t_enc;
struct drm_encoder *enc;
+ struct drm_connector *connector;
int ret;
- enc = kzalloc(sizeof(*enc), GFP_KERNEL);
- if (!enc)
- return ERR_PTR(-ENOMEM);
+ t_enc = drmm_simple_encoder_alloc(&tidss->ddev, struct tidss_encoder,
+ encoder, encoder_type);
+ if (IS_ERR(t_enc))
+ return PTR_ERR(t_enc);
+
+ t_enc->tidss = tidss;
+ t_enc->next_bridge = next_bridge;
+ t_enc->bridge.funcs = &tidss_bridge_funcs;
+ enc = &t_enc->encoder;
enc->possible_crtcs = possible_crtcs;
- ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs,
- encoder_type, NULL);
- if (ret < 0) {
- kfree(enc);
- return ERR_PTR(ret);
+ /* Attaching first bridge to the encoder */
+ ret = drm_bridge_attach(enc, &t_enc->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(tidss->dev, "bridge attach failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Initializing the connector at the end of bridge-chain */
+ connector = drm_bridge_connector_init(&tidss->ddev, enc);
+ if (IS_ERR(connector)) {
+ dev_err(tidss->dev, "bridge_connector create failed\n");
+ return PTR_ERR(connector);
+ }
+
+ ret = drm_connector_attach_encoder(connector, enc);
+ if (ret) {
+ dev_err(tidss->dev, "attaching encoder to connector failed\n");
+ return ret;
}
- drm_encoder_helper_add(enc, &encoder_helper_funcs);
+ t_enc->connector = connector;
dev_dbg(tidss->dev, "Encoder create done\n");
- return enc;
+ return ret;
}
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.h b/drivers/gpu/drm/tidss/tidss_encoder.h
index ace877c0e0fd..3e561d6b1e83 100644
--- a/drivers/gpu/drm/tidss/tidss_encoder.h
+++ b/drivers/gpu/drm/tidss/tidss_encoder.h
@@ -11,7 +11,8 @@
struct tidss_device;
-struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
- u32 encoder_type, u32 possible_crtcs);
+int tidss_encoder_create(struct tidss_device *tidss,
+ struct drm_bridge *next_bridge,
+ u32 encoder_type, u32 possible_crtcs);
#endif
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index ad2fa3c3d4a7..c979ad1af236 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -193,7 +193,6 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
for (i = 0; i < num_pipes; ++i) {
struct tidss_plane *tplane;
struct tidss_crtc *tcrtc;
- struct drm_encoder *enc;
u32 hw_plane_id = feat->vid_order[tidss->num_planes];
int ret;
@@ -216,16 +215,13 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
tidss->crtcs[tidss->num_crtcs++] = &tcrtc->crtc;
- enc = tidss_encoder_create(tidss, pipes[i].enc_type,
+ ret = tidss_encoder_create(tidss, pipes[i].bridge,
+ pipes[i].enc_type,
1 << tcrtc->crtc.index);
- if (IS_ERR(enc)) {
+ if (ret) {
dev_err(tidss->dev, "encoder create failed\n");
- return PTR_ERR(enc);
- }
-
- ret = drm_bridge_attach(enc, pipes[i].bridge, NULL, 0);
- if (ret)
return ret;
+ }
}
/* create overlay planes of the leftover planes */
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 6bdd6e4a955a..e1c0ef0c3894 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -38,7 +38,8 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
if (!new_plane_state->crtc) {
/*
* The visible field is not reset by the DRM core but only
- * updated by drm_plane_helper_check_state(), set it manually.
+ * updated by drm_atomic_helper_check_plane_state(), set it
+ * manually.
*/
new_plane_state->visible = false;
return 0;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 2729e16bc053..9aefd010acde 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -374,7 +374,7 @@ fail_backlight:
return ret;
}
-static int panel_remove(struct platform_device *pdev)
+static void panel_remove(struct platform_device *pdev)
{
struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
struct panel_module *panel_mod = to_panel_module(mod);
@@ -387,8 +387,6 @@ static int panel_remove(struct platform_device *pdev)
tilcdc_module_cleanup(mod);
kfree(panel_mod->info);
-
- return 0;
}
static const struct of_device_id panel_of_match[] = {
@@ -398,7 +396,7 @@ static const struct of_device_id panel_of_match[] = {
static struct platform_driver panel_driver = {
.probe = panel_probe,
- .remove = panel_remove,
+ .remove_new = panel_remove,
.driver = {
.name = "tilcdc-panel",
.of_match_table = panel_of_match,
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 077c6ff5a2e1..4ceb68ffac4b 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -316,19 +316,24 @@ static int ili9225_dbi_command(struct mipi_dbi *dbi, u8 *cmd, u8 *par,
u32 speed_hz;
int ret;
+ spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(dbi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, cmd, 1);
+ spi_bus_unlock(spi->controller);
if (ret || !num)
return ret;
if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !dbi->swap_bytes)
bpw = 16;
+ spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(dbi->dc, 1);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
+ spi_bus_unlock(spi->controller);
- return mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
+ return ret;
}
static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 02265c898816..938bceed5999 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -59,9 +59,11 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
* before being transferred as 8-bit on the big endian SPI bus.
*/
buf[0] = cpu_to_be16(*cmd);
+ spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(mipi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 2);
ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, buf, 2);
+ spi_bus_unlock(spi->controller);
if (ret || !num)
goto free;
@@ -79,9 +81,11 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
bpw = 16;
+ spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(mipi->dc, 1);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, data, num);
+ spi_bus_unlock(spi->controller);
free:
kfree(buf);
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
index 76cd7f515bab..2d999a0facde 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -1369,13 +1369,11 @@ static int ofdrm_probe(struct platform_device *pdev)
return 0;
}
-static int ofdrm_remove(struct platform_device *pdev)
+static void ofdrm_remove(struct platform_device *pdev)
{
struct drm_device *dev = platform_get_drvdata(pdev);
drm_dev_unplug(dev);
-
- return 0;
}
static const struct of_device_id ofdrm_of_match_display[] = {
@@ -1390,7 +1388,7 @@ static struct platform_driver ofdrm_platform_driver = {
.of_match_table = ofdrm_of_match_display,
},
.probe = ofdrm_probe,
- .remove = ofdrm_remove,
+ .remove_new = ofdrm_remove,
};
module_platform_driver(ofdrm_platform_driver);
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index eb9f13f18a02..f80a141fcf36 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -307,7 +307,8 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
if (IS_ERR(dbi->reset))
return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
- dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+ /* Multiple panels can share the "dc" GPIO, but only if they are on the same SPI bus! */
+ dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(dc))
return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index c2677d081a7b..13ae148f59b9 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -533,7 +533,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
epd->factored_stage_time);
- buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL);
+ buf = kmalloc(fb->width * fb->height / 8, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto out_exit;
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 25e11ef11c4c..ff86ba1ae1b8 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -888,14 +888,12 @@ static int simpledrm_probe(struct platform_device *pdev)
return 0;
}
-static int simpledrm_remove(struct platform_device *pdev)
+static void simpledrm_remove(struct platform_device *pdev)
{
struct simpledrm_device *sdev = platform_get_drvdata(pdev);
struct drm_device *dev = &sdev->dev;
drm_dev_unplug(dev);
-
- return 0;
}
static const struct of_device_id simpledrm_of_match_table[] = {
@@ -910,7 +908,7 @@ static struct platform_driver simpledrm_platform_driver = {
.of_match_table = simpledrm_of_match_table,
},
.probe = simpledrm_probe,
- .remove = simpledrm_remove,
+ .remove_new = simpledrm_remove,
};
module_platform_driver(simpledrm_platform_driver);
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index f906b22959cf..dad298127226 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -8,3 +8,4 @@ ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/ttm/tests/.kunitconfig b/drivers/gpu/drm/ttm/tests/.kunitconfig
new file mode 100644
index 000000000000..75fdce0cd98e
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_KUNIT_TEST_HELPERS=y
+CONFIG_DRM_TTM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/ttm/tests/Makefile b/drivers/gpu/drm/ttm/tests/Makefile
new file mode 100644
index 000000000000..ec87c4fc1ad5
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 AND MIT
+
+obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
+ ttm_device_test.o \
+ ttm_pool_test.o \
+ ttm_kunit_helpers.o
diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
new file mode 100644
index 000000000000..b1b423b68cdf
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_kunit_helpers.h"
+
+struct ttm_device_test_case {
+ const char *description;
+ bool use_dma_alloc;
+ bool use_dma32;
+ bool pools_init_expected;
+};
+
+static void ttm_device_init_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource_manager *ttm_sys_man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev->wq);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+
+ ttm_sys_man = &ttm_dev->sysman;
+ KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man);
+ KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_tt);
+ KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_type);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man->func);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_dev->dev_mapping,
+ priv->drm->anon_inode->i_mapping);
+
+ ttm_device_fini(ttm_dev);
+}
+
+static void ttm_device_init_multiple(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_devs;
+ unsigned int i, num_dev = 3;
+ int err;
+
+ ttm_devs = kunit_kcalloc(test, num_dev, sizeof(*ttm_devs), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
+
+ for (i = 0; i < num_dev; i++) {
+ err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
+ priv->drm->anon_inode->i_mapping);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].wq);
+ KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].funcs, &ttm_dev_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].man_drv[TTM_PL_SYSTEM]);
+ }
+
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&ttm_devs[0].device_list), num_dev);
+
+ for (i = 0; i < num_dev; i++)
+ ttm_device_fini(&ttm_devs[i]);
+}
+
+static void ttm_device_fini_basic(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_device *ttm_dev;
+ struct ttm_resource_manager *man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
+ KUNIT_ASSERT_NOT_NULL(test, man);
+
+ ttm_device_fini(ttm_dev);
+
+ KUNIT_ASSERT_FALSE(test, man->use_type);
+ KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[0]));
+ KUNIT_ASSERT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
+}
+
+static void ttm_device_init_no_vma_man(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ struct drm_device *drm = priv->drm;
+ struct ttm_device *ttm_dev;
+ struct drm_vma_offset_manager *vma_man;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ /* Let's pretend there's no VMA manager allocated */
+ vma_man = drm->vma_offset_manager;
+ drm->vma_offset_manager = NULL;
+
+ err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+
+ /* Bring the manager back for a graceful cleanup */
+ drm->vma_offset_manager = vma_man;
+}
+
+static const struct ttm_device_test_case ttm_device_cases[] = {
+ {
+ .description = "No DMA allocations, no DMA32 required",
+ .use_dma_alloc = false,
+ .use_dma32 = false,
+ .pools_init_expected = false,
+ },
+ {
+ .description = "DMA allocations, DMA32 required",
+ .use_dma_alloc = true,
+ .use_dma32 = true,
+ .pools_init_expected = true,
+ },
+ {
+ .description = "No DMA allocations, DMA32 required",
+ .use_dma_alloc = false,
+ .use_dma32 = true,
+ .pools_init_expected = false,
+ },
+ {
+ .description = "DMA allocations, no DMA32 required",
+ .use_dma_alloc = true,
+ .use_dma32 = false,
+ .pools_init_expected = true,
+ },
+};
+
+static void ttm_device_case_desc(const struct ttm_device_test_case *t, char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_device, ttm_device_cases, ttm_device_case_desc);
+
+static void ttm_device_init_pools(struct kunit *test)
+{
+ struct ttm_test_devices *priv = test->priv;
+ const struct ttm_device_test_case *params = test->param_value;
+ struct ttm_device *ttm_dev;
+ struct ttm_pool *pool;
+ struct ttm_pool_type pt;
+ int err;
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(priv, ttm_dev,
+ params->use_dma_alloc,
+ params->use_dma32);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ pool = &ttm_dev->pool;
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+ KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
+ KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
+ KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32);
+
+ if (params->pools_init_expected) {
+ for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (int j = 0; j <= MAX_ORDER; ++j) {
+ pt = pool->caching[i].orders[j];
+ KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
+ KUNIT_EXPECT_EQ(test, pt.caching, i);
+ KUNIT_EXPECT_EQ(test, pt.order, j);
+
+ if (params->use_dma_alloc)
+ KUNIT_ASSERT_FALSE(test,
+ list_empty(&pt.pages));
+ }
+ }
+ }
+
+ ttm_device_fini(ttm_dev);
+}
+
+static struct kunit_case ttm_device_test_cases[] = {
+ KUNIT_CASE(ttm_device_init_basic),
+ KUNIT_CASE(ttm_device_init_multiple),
+ KUNIT_CASE(ttm_device_fini_basic),
+ KUNIT_CASE(ttm_device_init_no_vma_man),
+ KUNIT_CASE_PARAM(ttm_device_init_pools, ttm_device_gen_params),
+ {}
+};
+
+static struct kunit_suite ttm_device_test_suite = {
+ .name = "ttm_device",
+ .init = ttm_test_devices_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_device_test_cases,
+};
+
+kunit_test_suites(&ttm_device_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
new file mode 100644
index 000000000000..81661d8827aa
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include "ttm_kunit_helpers.h"
+
+struct ttm_device_funcs ttm_dev_funcs = {
+};
+EXPORT_SYMBOL_GPL(ttm_dev_funcs);
+
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ bool use_dma_alloc,
+ bool use_dma32)
+{
+ struct drm_device *drm = priv->drm;
+ int err;
+
+ err = ttm_device_init(ttm, &ttm_dev_funcs, drm->dev,
+ drm->anon_inode->i_mapping,
+ drm->vma_offset_manager,
+ use_dma_alloc, use_dma32);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
+
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+ struct ttm_test_devices *devs,
+ size_t size)
+{
+ struct drm_gem_object gem_obj = { .size = size };
+ struct ttm_buffer_object *bo;
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ bo->base = gem_obj;
+ bo->bdev = devs->ttm_dev;
+
+ return bo;
+}
+EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
+{
+ struct ttm_test_devices *devs;
+
+ devs = kunit_kzalloc(test, sizeof(*devs), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, devs);
+
+ devs->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
+
+ devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev,
+ sizeof(*devs->drm), 0,
+ DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm);
+
+ return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_basic);
+
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
+{
+ struct ttm_test_devices *devs;
+ struct ttm_device *ttm_dev;
+ int err;
+
+ devs = ttm_test_devices_basic(test);
+
+ ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
+ err = ttm_device_kunit_init(devs, ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ devs->ttm_dev = ttm_dev;
+
+ return devs;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_all);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs)
+{
+ if (devs->ttm_dev)
+ ttm_device_fini(devs->ttm_dev);
+
+ drm_kunit_helper_free_device(test, devs->dev);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_put);
+
+int ttm_test_devices_init(struct kunit *test)
+{
+ struct ttm_test_devices *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv = ttm_test_devices_basic(test);
+ test->priv = priv;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_init);
+
+void ttm_test_devices_fini(struct kunit *test)
+{
+ ttm_test_devices_put(test, test->priv);
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
new file mode 100644
index 000000000000..e261e3660d0b
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#ifndef TTM_KUNIT_HELPERS_H
+#define TTM_KUNIT_HELPERS_H
+
+#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_bo.h>
+
+#include <drm/drm_kunit_helpers.h>
+#include <kunit/test.h>
+
+extern struct ttm_device_funcs ttm_dev_funcs;
+
+struct ttm_test_devices {
+ struct drm_device *drm;
+ struct device *dev;
+ struct ttm_device *ttm_dev;
+};
+
+/* Building blocks for test-specific init functions */
+int ttm_device_kunit_init(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ bool use_dma_alloc,
+ bool use_dma32);
+struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
+ struct ttm_test_devices *devs,
+ size_t size);
+
+struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test);
+struct ttm_test_devices *ttm_test_devices_all(struct kunit *test);
+
+void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs);
+
+/* Generic init/fini for tests that only need DRM/TTM devices */
+int ttm_test_devices_init(struct kunit *test);
+void ttm_test_devices_fini(struct kunit *test);
+
+#endif // TTM_KUNIT_HELPERS_H
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
new file mode 100644
index 000000000000..8d90870fb199
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/mm.h>
+
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_pool.h>
+
+#include "ttm_kunit_helpers.h"
+
+struct ttm_pool_test_case {
+ const char *description;
+ unsigned int order;
+ bool use_dma_alloc;
+};
+
+struct ttm_pool_test_priv {
+ struct ttm_test_devices *devs;
+
+ /* Used to create mock ttm_tts */
+ struct ttm_buffer_object *mock_bo;
+};
+
+static struct ttm_operation_ctx simple_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+};
+
+static int ttm_pool_test_init(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv->devs = ttm_test_devices_basic(test);
+ test->priv = priv;
+
+ return 0;
+}
+
+static void ttm_pool_test_fini(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+
+ ttm_test_devices_put(test, priv->devs);
+}
+
+static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
+ uint32_t page_flags,
+ enum ttm_caching caching,
+ size_t size)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, priv->devs, size);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+ priv->mock_bo = bo;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ return tt;
+}
+
+static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
+ size_t size,
+ enum ttm_caching caching)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_pool *pool;
+ struct ttm_tt *tt;
+ unsigned long order = __fls(size / PAGE_SIZE);
+ int err;
+
+ tt = ttm_tt_kunit_init(test, order, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ return pool;
+}
+
+static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
+ {
+ .description = "One page",
+ .order = 0,
+ },
+ {
+ .description = "More than one page",
+ .order = 2,
+ },
+ {
+ .description = "Above the allocation limit",
+ .order = MAX_ORDER + 1,
+ },
+ {
+ .description = "One page, with coherent DMA mappings enabled",
+ .order = 0,
+ .use_dma_alloc = true,
+ },
+ {
+ .description = "Above the allocation limit, with coherent DMA mappings enabled",
+ .order = MAX_ORDER + 1,
+ .use_dma_alloc = true,
+ },
+};
+
+static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
+ ttm_pool_alloc_case_desc);
+
+static void ttm_pool_alloc_basic(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ const struct ttm_pool_test_case *params = test->param_value;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct page *fst_page, *last_page;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int expected_num_pages = 1 << params->order;
+ size_t size = expected_num_pages * PAGE_SIZE;
+ int err;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
+ false);
+
+ KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
+ KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
+ KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+ fst_page = tt->pages[0];
+ last_page = tt->pages[tt->num_pages - 1];
+
+ if (params->order <= MAX_ORDER) {
+ if (params->use_dma_alloc) {
+ KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
+ } else {
+ KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
+ }
+ } else {
+ if (params->use_dma_alloc) {
+ KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
+ KUNIT_ASSERT_NULL(test, (void *)last_page->private);
+ } else {
+ /*
+ * We expect to alloc one big block, followed by
+ * order 0 blocks
+ */
+ KUNIT_ASSERT_EQ(test, fst_page->private,
+ min_t(unsigned int, MAX_ORDER,
+ params->order));
+ KUNIT_ASSERT_EQ(test, last_page->private, 0);
+ }
+ }
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ const struct ttm_pool_test_case *params = test->param_value;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_buffer_object *bo;
+ dma_addr_t dma1, dma2;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int expected_num_pages = 1 << params->order;
+ size_t size = expected_num_pages * PAGE_SIZE;
+ int err;
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ bo = ttm_bo_kunit_init(test, devs, size);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ err = ttm_sg_tt_init(tt, bo, 0, caching);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
+
+ dma1 = tt->dma_address[0];
+ dma2 = tt->dma_address[tt->num_pages - 1];
+
+ KUNIT_ASSERT_NOT_NULL(test, (void *)dma1);
+ KUNIT_ASSERT_NOT_NULL(test, (void *)dma2);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_caching_match(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 0;
+ size_t size = PAGE_SIZE;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, size, caching);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt_pool, *pt_tt;
+ enum ttm_caching tt_caching = ttm_uncached;
+ enum ttm_caching pool_caching = ttm_cached;
+ size_t size = PAGE_SIZE;
+ unsigned int order = 0;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, size, pool_caching);
+
+ pt_pool = &pool->caching[pool_caching].orders[order];
+ pt_tt = &pool->caching[tt_caching].orders[order];
+
+ tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_alloc_order_mismatch(struct kunit *test)
+{
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt_pool, *pt_tt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t fst_size = (1 << order) * PAGE_SIZE;
+ size_t snd_size = PAGE_SIZE;
+ int err;
+
+ pool = ttm_pool_pre_populated(test, fst_size, caching);
+
+ pt_pool = &pool->caching[caching].orders[order];
+ pt_tt = &pool->caching[caching].orders[0];
+
+ tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
+
+ err = ttm_pool_alloc(pool, tt, &simple_ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_dma_alloc(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t size = (1 << order) * PAGE_SIZE;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+ ttm_pool_alloc(pool, tt, &simple_ctx);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_free_no_dma_alloc(struct kunit *test)
+{
+ struct ttm_pool_test_priv *priv = test->priv;
+ struct ttm_test_devices *devs = priv->devs;
+ struct ttm_tt *tt;
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 2;
+ size_t size = (1 << order) * PAGE_SIZE;
+
+ tt = ttm_tt_kunit_init(test, 0, caching, size);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, pool);
+
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
+ ttm_pool_alloc(pool, tt, &simple_ctx);
+
+ pt = &pool->caching[caching].orders[order];
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+ ttm_pool_free(pool, tt);
+ ttm_tt_fini(tt);
+
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
+
+ ttm_pool_fini(pool);
+}
+
+static void ttm_pool_fini_basic(struct kunit *test)
+{
+ struct ttm_pool *pool;
+ struct ttm_pool_type *pt;
+ enum ttm_caching caching = ttm_uncached;
+ unsigned int order = 0;
+ size_t size = PAGE_SIZE;
+
+ pool = ttm_pool_pre_populated(test, size, caching);
+ pt = &pool->caching[caching].orders[order];
+
+ KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
+
+ ttm_pool_fini(pool);
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
+}
+
+static struct kunit_case ttm_pool_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
+ KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
+ ttm_pool_alloc_basic_gen_params),
+ KUNIT_CASE(ttm_pool_alloc_order_caching_match),
+ KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
+ KUNIT_CASE(ttm_pool_alloc_order_mismatch),
+ KUNIT_CASE(ttm_pool_free_dma_alloc),
+ KUNIT_CASE(ttm_pool_free_no_dma_alloc),
+ KUNIT_CASE(ttm_pool_fini_basic),
+ {}
+};
+
+static struct kunit_suite ttm_pool_test_suite = {
+ .name = "ttm_pool",
+ .init = ttm_pool_test_init,
+ .exit = ttm_pool_test_fini,
+ .test_cases = ttm_pool_test_cases,
+};
+
+kunit_test_suites(&ttm_pool_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index bd5dae4d1624..e58b7e249816 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -345,6 +345,7 @@ static void ttm_bo_release(struct kref *kref)
if (!dma_resv_test_signaled(bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP) ||
+ (want_init_on_free() && (bo->ttm != NULL)) ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
@@ -458,18 +459,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
goto out;
}
-bounce:
- ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
- if (ret == -EMULTIHOP) {
+ do {
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
+ if (ret != -EMULTIHOP)
+ break;
+
ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
- if (ret) {
- if (ret != -ERESTARTSYS && ret != -EINTR)
- pr_err("Buffer eviction failed\n");
- ttm_resource_free(bo, &evict_mem);
- goto out;
- }
- /* try and move to final place now. */
- goto bounce;
+ } while (!ret);
+
+ if (ret) {
+ ttm_resource_free(bo, &evict_mem);
+ if (ret != -ERESTARTSYS && ret != -EINTR)
+ pr_err("Buffer eviction failed\n");
}
out:
return ret;
@@ -517,6 +518,13 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
{
bool ret = false;
+ if (bo->pin_count) {
+ *locked = false;
+ if (busy)
+ *busy = false;
+ return false;
+ }
+
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
if (ctx->allow_res_evict)
@@ -1154,7 +1162,6 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* Move to system cached
*/
if (bo->resource->mem_type != TTM_PL_SYSTEM) {
- struct ttm_operation_ctx ctx = { false, false };
struct ttm_resource *evict_mem;
struct ttm_place hop;
@@ -1164,9 +1171,10 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
if (unlikely(ret))
goto out;
- ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
+ ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
if (unlikely(ret != 0)) {
WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
+ ttm_resource_free(bo, &evict_mem);
goto out;
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index df4cf5468e7f..7726a72befc5 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -213,7 +213,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
bdev->funcs = funcs;
ttm_sys_man_init(bdev);
- ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
+ ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 4db3982057be..cddb9151d20f 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -93,7 +93,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
__GFP_KSWAPD_RECLAIM;
if (!pool->use_dma_alloc) {
- p = alloc_pages(gfp_flags, order);
+ p = alloc_pages_node(pool->nid, gfp_flags, order);
if (p)
p->private = order;
return p;
@@ -287,7 +287,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
- if (pool->use_dma_alloc)
+ if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
@@ -545,29 +545,32 @@ EXPORT_SYMBOL(ttm_pool_free);
*
* @pool: the pool to initialize
* @dev: device for DMA allocations and mappings
+ * @nid: NUMA node to use for allocations
* @use_dma_alloc: true if coherent DMA alloc should be used
* @use_dma32: true if GFP_DMA32 should be used
*
* Initialize the pool and its pool types.
*/
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
- bool use_dma_alloc, bool use_dma32)
+ int nid, bool use_dma_alloc, bool use_dma32)
{
unsigned int i, j;
WARN_ON(!dev && use_dma_alloc);
pool->dev = dev;
+ pool->nid = nid;
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
- if (use_dma_alloc) {
+ if (use_dma_alloc || nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
}
+EXPORT_SYMBOL(ttm_pool_init);
/**
* ttm_pool_fini - Cleanup a pool
@@ -581,7 +584,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
- if (pool->use_dma_alloc) {
+ if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
@@ -592,6 +595,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
*/
synchronize_shrinkers();
}
+EXPORT_SYMBOL(ttm_pool_fini);
/* As long as pages are available make sure to release at least one */
static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 7333f7a87a2f..46ff9c75bb12 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -86,6 +86,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
struct ttm_resource *res)
{
if (pos->last != res) {
+ if (pos->first == res)
+ pos->first = list_next_entry(res, lru);
list_move(&res->lru, &pos->last->lru);
pos->last = res;
}
@@ -111,7 +113,8 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
- if (unlikely(pos->first == res && pos->last == res)) {
+ if (unlikely(WARN_ON(!pos->first || !pos->last) ||
+ (pos->first == res && pos->last == res))) {
pos->first = NULL;
pos->last = NULL;
} else if (pos->first == res) {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 1ce4b36ab33b..e0a77671edd6 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -449,3 +449,9 @@ ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
return &iter_tt->base;
}
EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
+
+unsigned long ttm_tt_pages_limit(void)
+{
+ return ttm_pages_limit;
+}
+EXPORT_SYMBOL(ttm_tt_pages_limit);
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 40b1168ad671..0bb56d063536 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -236,7 +236,7 @@ dev_unref:
return ret;
}
-static int tve200_remove(struct platform_device *pdev)
+static void tve200_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
struct tve200_drm_dev_private *priv = drm->dev_private;
@@ -247,8 +247,6 @@ static int tve200_remove(struct platform_device *pdev)
drm_mode_config_cleanup(drm);
clk_disable_unprepare(priv->pclk);
drm_dev_put(drm);
-
- return 0;
}
static const struct of_device_id tve200_of_match[] = {
@@ -261,10 +259,10 @@ static const struct of_device_id tve200_of_match[] = {
static struct platform_driver tve200_driver = {
.driver = {
.name = "tve200",
- .of_match_table = of_match_ptr(tve200_of_match),
+ .of_match_table = tve200_of_match,
},
.probe = tve200_probe,
- .remove = tve200_remove,
+ .remove_new = tve200_remove,
};
drm_module_platform_driver(tve200_driver);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index aa02fd2789c3..40876bcdd79a 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -310,16 +311,6 @@ static const struct drm_plane_funcs udl_primary_plane_funcs = {
* CRTC
*/
-static int udl_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
-{
- struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
-
- if (!new_crtc_state->enable)
- return 0;
-
- return drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
-}
-
static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
@@ -381,7 +372,7 @@ out:
}
static const struct drm_crtc_helper_funcs udl_crtc_helper_funcs = {
- .atomic_check = udl_crtc_helper_atomic_check,
+ .atomic_check = drm_crtc_helper_atomic_check,
.atomic_enable = udl_crtc_helper_atomic_enable,
.atomic_disable = udl_crtc_helper_atomic_disable,
};
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 478f1f0f60de..ffbbe9d527d3 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -171,10 +171,7 @@ static const struct drm_driver v3d_drm_driver = {
#endif
.gem_create_object = v3d_create_object,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = v3d_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
.ioctls = v3d_drm_ioctls,
.num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
@@ -295,7 +292,7 @@ dma_free:
return ret;
}
-static int v3d_platform_drm_remove(struct platform_device *pdev)
+static void v3d_platform_drm_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
struct v3d_dev *v3d = to_v3d_dev(drm);
@@ -306,13 +303,11 @@ static int v3d_platform_drm_remove(struct platform_device *pdev)
dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
v3d->mmu_scratch_paddr);
-
- return 0;
}
static struct platform_driver v3d_platform_driver = {
.probe = v3d_platform_drm_probe,
- .remove = v3d_platform_drm_remove,
+ .remove_new = v3d_platform_drm_remove,
.driver = {
.name = "v3d",
.of_match_table = v3d_of_match,
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index b74b1351bfc8..7f664a4b2a75 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -340,7 +340,7 @@ struct v3d_submit_ext {
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
{
/* nsecs_to_jiffies64() does not guard against overflow */
- if (NSEC_PER_SEC % HZ &&
+ if ((NSEC_PER_SEC % HZ) != 0 &&
div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
return MAX_JIFFY_OFFSET;
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c
index a4bed26af32f..63ca46f4cb35 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_mock.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c
@@ -153,6 +153,13 @@ static int __build_mock(struct kunit *test, struct drm_device *drm,
return 0;
}
+static void kunit_action_drm_dev_unregister(void *ptr)
+{
+ struct drm_device *drm = ptr;
+
+ drm_dev_unregister(drm);
+}
+
static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
{
struct drm_device *drm;
@@ -186,6 +193,11 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
ret = drm_dev_register(drm, 0);
KUNIT_ASSERT_EQ(test, ret, 0);
+ ret = kunit_add_action_or_reset(test,
+ kunit_action_drm_dev_unregister,
+ drm);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
return vc4;
}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
index ae0bd0f81698..61622e951031 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -20,7 +20,6 @@
struct pv_muxing_priv {
struct vc4_dev *vc4;
- struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
};
@@ -725,7 +724,7 @@ static void drm_vc4_test_pv_muxing_invalid(struct kunit *test)
static int vc4_pv_muxing_test_init(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
- struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx *ctx;
struct pv_muxing_priv *priv;
struct drm_device *drm;
struct vc4_dev *vc4;
@@ -738,33 +737,16 @@ static int vc4_pv_muxing_test_init(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
priv->vc4 = vc4;
- drm_modeset_acquire_init(&priv->ctx, 0);
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
drm = &vc4->base;
- state = drm_atomic_state_alloc(drm);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
-
- state->acquire_ctx = &priv->ctx;
-
- priv->state = state;
+ priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state);
return 0;
}
-static void vc4_pv_muxing_test_exit(struct kunit *test)
-{
- struct pv_muxing_priv *priv = test->priv;
- struct vc4_dev *vc4 = priv->vc4;
- struct drm_device *drm = &vc4->base;
- struct drm_atomic_state *state = priv->state;
-
- drm_atomic_state_put(state);
- drm_modeset_drop_locks(&priv->ctx);
- drm_modeset_acquire_fini(&priv->ctx);
- drm_dev_unregister(drm);
- drm_kunit_helper_free_device(test, vc4->dev);
-}
-
static struct kunit_case vc4_pv_muxing_tests[] = {
KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing,
vc4_test_pv_muxing_gen_params),
@@ -776,7 +758,6 @@ static struct kunit_case vc4_pv_muxing_tests[] = {
static struct kunit_suite vc4_pv_muxing_test_suite = {
.name = "vc4-pv-muxing-combinations",
.init = vc4_pv_muxing_test_init,
- .exit = vc4_pv_muxing_test_exit,
.test_cases = vc4_pv_muxing_tests,
};
@@ -791,7 +772,6 @@ static struct kunit_case vc5_pv_muxing_tests[] = {
static struct kunit_suite vc5_pv_muxing_test_suite = {
.name = "vc5-pv-muxing-combinations",
.init = vc4_pv_muxing_test_init,
- .exit = vc4_pv_muxing_test_exit,
.test_cases = vc5_pv_muxing_tests,
};
@@ -802,7 +782,7 @@ static struct kunit_suite vc5_pv_muxing_test_suite = {
*/
static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *test)
{
- struct drm_modeset_acquire_ctx ctx;
+ struct drm_modeset_acquire_ctx *ctx;
struct drm_atomic_state *state;
struct vc4_crtc_state *new_vc4_crtc_state;
struct vc4_hvs_state *new_hvs_state;
@@ -815,14 +795,13 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
vc4 = vc5_mock_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
- drm_modeset_acquire_init(&ctx, 0);
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
drm = &vc4->base;
- state = drm_atomic_state_alloc(drm);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
- state->acquire_ctx = &ctx;
-
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -843,13 +822,9 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
- drm_atomic_state_put(state);
-
- state = drm_atomic_state_alloc(drm);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
- state->acquire_ctx = &ctx;
-
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -868,17 +843,18 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use);
KUNIT_EXPECT_NE(test, hdmi0_channel, hdmi1_channel);
-
- drm_atomic_state_put(state);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- drm_dev_unregister(drm);
- drm_kunit_helper_free_device(test, vc4->dev);
}
+/*
+ * This test makes sure that we never change the FIFO of an active HVS
+ * channel if we disable a FIFO with a lower index.
+ *
+ * Doing so would result in a FIFO stall and would disrupt an output
+ * supposed to be unaffected by the commit.
+ */
static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
{
- struct drm_modeset_acquire_ctx ctx;
+ struct drm_modeset_acquire_ctx *ctx;
struct drm_atomic_state *state;
struct vc4_crtc_state *new_vc4_crtc_state;
struct vc4_hvs_state *new_hvs_state;
@@ -891,14 +867,13 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
vc4 = vc5_mock_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
- drm_modeset_acquire_init(&ctx, 0);
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
drm = &vc4->base;
- state = drm_atomic_state_alloc(drm);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
- state->acquire_ctx = &ctx;
-
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -930,13 +905,9 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
- drm_atomic_state_put(state);
-
- state = drm_atomic_state_alloc(drm);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
- state->acquire_ctx = &ctx;
-
ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -958,18 +929,27 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
KUNIT_EXPECT_EQ(test, old_hdmi1_channel, hdmi1_channel);
}
-
- drm_atomic_state_put(state);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- drm_dev_unregister(drm);
- drm_kunit_helper_free_device(test, vc4->dev);
}
+/*
+ * Test that if we affect a single output, only the CRTC state of that
+ * output will be pulled in the global atomic state.
+ *
+ * This is relevant for two things:
+ *
+ * - If we don't have that state at all, we are unlikely to affect the
+ * FIFO muxing. This is somewhat redundant with
+ * drm_test_vc5_pv_muxing_bugs_stable_fifo()
+ *
+ * - KMS waits for page flips to occur on all the CRTC found in the
+ * CRTC state. Since the CRTC is unaffected, we would over-wait, but
+ * most importantly run into corner cases like waiting on an
+ * inactive CRTC that never completes.
+ */
static void
drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct kunit *test)
{
- struct drm_modeset_acquire_ctx ctx;
+ struct drm_modeset_acquire_ctx *ctx;
struct drm_atomic_state *state;
struct vc4_crtc_state *new_vc4_crtc_state;
struct drm_device *drm;
@@ -979,14 +959,13 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
vc4 = vc5_mock_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
- drm_modeset_acquire_init(&ctx, 0);
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
drm = &vc4->base;
- state = drm_atomic_state_alloc(drm);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
- state->acquire_ctx = &ctx;
-
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -996,13 +975,9 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
- drm_atomic_state_put(state);
-
- state = drm_atomic_state_alloc(drm);
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
- state->acquire_ctx = &ctx;
-
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -1012,12 +987,6 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
VC4_ENCODER_TYPE_HDMI0);
KUNIT_EXPECT_NULL(test, new_vc4_crtc_state);
-
- drm_atomic_state_put(state);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- drm_dev_unregister(drm);
- drm_kunit_helper_free_device(test, vc4->dev);
}
static struct kunit_case vc5_pv_muxing_bugs_tests[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index bef9d45ef1df..8b5a7e5eb146 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -31,7 +31,8 @@
#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic.h>
@@ -1450,15 +1451,14 @@ static int vc4_crtc_dev_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &vc4_crtc_ops);
}
-static int vc4_crtc_dev_remove(struct platform_device *pdev)
+static void vc4_crtc_dev_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vc4_crtc_ops);
- return 0;
}
struct platform_driver vc4_crtc_driver = {
.probe = vc4_crtc_dev_probe,
- .remove = vc4_crtc_dev_remove,
+ .remove_new = vc4_crtc_dev_remove,
.driver = {
.name = "vc4_crtc",
.of_match_table = vc4_crtc_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index e68c07d86040..39152e755a13 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -22,8 +22,8 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/media-bus-format.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -388,15 +388,14 @@ static int vc4_dpi_dev_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &vc4_dpi_ops);
}
-static int vc4_dpi_dev_remove(struct platform_device *pdev)
+static void vc4_dpi_dev_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vc4_dpi_ops);
- return 0;
}
struct platform_driver vc4_dpi_driver = {
.probe = vc4_dpi_dev_probe,
- .remove = vc4_dpi_dev_remove,
+ .remove_new = vc4_dpi_dev_remove,
.driver = {
.name = "vc4_dpi",
.of_match_table = vc4_dpi_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 823395c23cc3..1b3531374967 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -439,11 +439,9 @@ static int vc4_platform_drm_probe(struct platform_device *pdev)
return component_master_add_with_match(dev, &vc4_drm_ops, match);
}
-static int vc4_platform_drm_remove(struct platform_device *pdev)
+static void vc4_platform_drm_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &vc4_drm_ops);
-
- return 0;
}
static const struct of_device_id vc4_of_match[] = {
@@ -456,7 +454,7 @@ MODULE_DEVICE_TABLE(of, vc4_of_match);
static struct platform_driver vc4_platform_driver = {
.probe = vc4_platform_drm_probe,
- .remove = vc4_platform_drm_remove,
+ .remove_new = vc4_platform_drm_remove,
.driver = {
.name = "vc4-drm",
.of_match_table = vc4_of_match,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 9e0c355b236f..46f6c4ce61c5 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -25,8 +25,9 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
@@ -1825,20 +1826,18 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
return 0;
}
-static int vc4_dsi_dev_remove(struct platform_device *pdev)
+static void vc4_dsi_dev_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi = dev_get_drvdata(dev);
mipi_dsi_host_unregister(&dsi->dsi_host);
vc4_dsi_put(dsi);
-
- return 0;
}
struct platform_driver vc4_dsi_driver = {
.probe = vc4_dsi_dev_probe,
- .remove = vc4_dsi_dev_remove,
+ .remove_new = vc4_dsi_dev_remove,
.driver = {
.name = "vc4_dsi",
.of_match_table = vc4_dsi_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 6da41ea1250a..a488625773dc 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -41,8 +41,8 @@
#include <linux/component.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/rational.h>
#include <linux/reset.h>
@@ -742,7 +742,7 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
if (ret)
return ret;
- ret = drm_mode_create_hdmi_colorspace_property(connector);
+ ret = drm_mode_create_hdmi_colorspace_property(connector, 0);
if (ret)
return ret;
@@ -3770,10 +3770,9 @@ static int vc4_hdmi_dev_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &vc4_hdmi_ops);
}
-static int vc4_hdmi_dev_remove(struct platform_device *pdev)
+static void vc4_hdmi_dev_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vc4_hdmi_ops);
- return 0;
}
static const struct vc4_hdmi_variant bcm2835_variant = {
@@ -3869,7 +3868,7 @@ static const struct dev_pm_ops vc4_hdmi_pm_ops = {
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
- .remove = vc4_hdmi_dev_remove,
+ .remove_new = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 4da66ef96783..04af672caacb 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -1061,10 +1061,9 @@ static int vc4_hvs_dev_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &vc4_hvs_ops);
}
-static int vc4_hvs_dev_remove(struct platform_device *pdev)
+static void vc4_hvs_dev_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vc4_hvs_ops);
- return 0;
}
static const struct of_device_id vc4_hvs_dt_match[] = {
@@ -1075,7 +1074,7 @@ static const struct of_device_id vc4_hvs_dt_match[] = {
struct platform_driver vc4_hvs_driver = {
.probe = vc4_hvs_dev_probe,
- .remove = vc4_hvs_dev_remove,
+ .remove_new = vc4_hvs_dev_remove,
.driver = {
.name = "vc4_hvs",
.of_match_table = vc4_hvs_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index c5abdec03103..ffe1f7d1b911 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -9,8 +9,8 @@
#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic.h>
@@ -573,10 +573,9 @@ static int vc4_txp_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &vc4_txp_ops);
}
-static int vc4_txp_remove(struct platform_device *pdev)
+static void vc4_txp_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vc4_txp_ops);
- return 0;
}
static const struct of_device_id vc4_txp_dt_match[] = {
@@ -586,7 +585,7 @@ static const struct of_device_id vc4_txp_dt_match[] = {
struct platform_driver vc4_txp_driver = {
.probe = vc4_txp_probe,
- .remove = vc4_txp_remove,
+ .remove_new = vc4_txp_remove,
.driver = {
.name = "vc4_txp",
.of_match_table = vc4_txp_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 29a664c8bf44..04ac7805e6d5 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -532,10 +532,9 @@ static int vc4_v3d_dev_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &vc4_v3d_ops);
}
-static int vc4_v3d_dev_remove(struct platform_device *pdev)
+static void vc4_v3d_dev_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vc4_v3d_ops);
- return 0;
}
const struct of_device_id vc4_v3d_dt_match[] = {
@@ -547,7 +546,7 @@ const struct of_device_id vc4_v3d_dt_match[] = {
struct platform_driver vc4_v3d_driver = {
.probe = vc4_v3d_dev_probe,
- .remove = vc4_v3d_dev_remove,
+ .remove_new = vc4_v3d_dev_remove,
.driver = {
.name = "vc4_v3d",
.of_match_table = vc4_v3d_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index d6e6a1a22eba..268f18b10ee0 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -21,8 +21,8 @@
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "vc4_drv.h"
@@ -812,15 +812,14 @@ static int vc4_vec_dev_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &vc4_vec_ops);
}
-static int vc4_vec_dev_remove(struct platform_device *pdev)
+static void vc4_vec_dev_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &vc4_vec_ops);
- return 0;
}
struct platform_driver vc4_vec_driver = {
.probe = vc4_vec_dev_probe,
- .remove = vc4_vec_dev_remove,
+ .remove_new = vc4_vec_dev_remove,
.driver = {
.name = "vc4_vec",
.of_match_table = vc4_vec_dt_match,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index add075681e18..644b8ee51009 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -176,7 +176,8 @@ static const struct drm_driver driver = {
* If KMS is disabled DRIVER_MODESET and DRIVER_ATOMIC are masked
* out via drm_device::driver_features:
*/
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
@@ -186,9 +187,6 @@ static const struct drm_driver driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
#endif
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_mmap = drm_gem_prime_mmap,
.gem_prime_import = virtgpu_gem_prime_import,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
index cf3c04b16a7a..3c00135ead45 100644
--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
+++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
@@ -14,11 +14,24 @@
#include <linux/uaccess.h>
#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
#include <drm/virtgpu_drm.h>
#include "virtgpu_drv.h"
+struct virtio_gpu_submit_post_dep {
+ struct drm_syncobj *syncobj;
+ struct dma_fence_chain *chain;
+ u64 point;
+};
+
struct virtio_gpu_submit {
+ struct virtio_gpu_submit_post_dep *post_deps;
+ unsigned int num_out_syncobjs;
+
+ struct drm_syncobj **in_syncobjs;
+ unsigned int num_in_syncobjs;
+
struct virtio_gpu_object_array *buflist;
struct drm_virtgpu_execbuffer *exbuf;
struct virtio_gpu_fence *out_fence;
@@ -59,18 +72,211 @@ static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,
return 0;
}
+static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs,
+ u32 nr_syncobjs)
+{
+ u32 i = nr_syncobjs;
+
+ while (i--) {
+ if (syncobjs[i])
+ drm_syncobj_put(syncobjs[i]);
+ }
+
+ kvfree(syncobjs);
+}
+
+static int
+virtio_gpu_parse_deps(struct virtio_gpu_submit *submit)
+{
+ struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+ struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+ size_t syncobj_stride = exbuf->syncobj_stride;
+ u32 num_in_syncobjs = exbuf->num_in_syncobjs;
+ struct drm_syncobj **syncobjs;
+ int ret = 0, i;
+
+ if (!num_in_syncobjs)
+ return 0;
+
+ /*
+ * kvalloc at first tries to allocate memory using kmalloc and
+ * falls back to vmalloc only on failure. It also uses __GFP_NOWARN
+ * internally for allocations larger than a page size, preventing
+ * storm of KMSG warnings.
+ */
+ syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL);
+ if (!syncobjs)
+ return -ENOMEM;
+
+ for (i = 0; i < num_in_syncobjs; i++) {
+ u64 address = exbuf->in_syncobjs + i * syncobj_stride;
+ struct dma_fence *fence;
+
+ memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle,
+ syncobj_desc.point, 0, &fence);
+ if (ret)
+ break;
+
+ ret = virtio_gpu_dma_fence_wait(submit, fence);
+
+ dma_fence_put(fence);
+ if (ret)
+ break;
+
+ if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) {
+ syncobjs[i] = drm_syncobj_find(submit->file,
+ syncobj_desc.handle);
+ if (!syncobjs[i]) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ virtio_gpu_free_syncobjs(syncobjs, i);
+ return ret;
+ }
+
+ submit->num_in_syncobjs = num_in_syncobjs;
+ submit->in_syncobjs = syncobjs;
+
+ return ret;
+}
+
+static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs,
+ u32 nr_syncobjs)
+{
+ u32 i;
+
+ for (i = 0; i < nr_syncobjs; i++) {
+ if (syncobjs[i])
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+ }
+}
+
+static void
+virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps,
+ u32 nr_syncobjs)
+{
+ u32 i = nr_syncobjs;
+
+ while (i--) {
+ kfree(post_deps[i].chain);
+ drm_syncobj_put(post_deps[i].syncobj);
+ }
+
+ kvfree(post_deps);
+}
+
+static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit)
+{
+ struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+ struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+ struct virtio_gpu_submit_post_dep *post_deps;
+ u32 num_out_syncobjs = exbuf->num_out_syncobjs;
+ size_t syncobj_stride = exbuf->syncobj_stride;
+ int ret = 0, i;
+
+ if (!num_out_syncobjs)
+ return 0;
+
+ post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL);
+ if (!post_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_out_syncobjs; i++) {
+ u64 address = exbuf->out_syncobjs + i * syncobj_stride;
+
+ memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ post_deps[i].point = syncobj_desc.point;
+
+ if (syncobj_desc.flags) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (syncobj_desc.point) {
+ post_deps[i].chain = dma_fence_chain_alloc();
+ if (!post_deps[i].chain) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ post_deps[i].syncobj = drm_syncobj_find(submit->file,
+ syncobj_desc.handle);
+ if (!post_deps[i].syncobj) {
+ kfree(post_deps[i].chain);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret) {
+ virtio_gpu_free_post_deps(post_deps, i);
+ return ret;
+ }
+
+ submit->num_out_syncobjs = num_out_syncobjs;
+ submit->post_deps = post_deps;
+
+ return 0;
+}
+
+static void
+virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit)
+{
+ struct virtio_gpu_submit_post_dep *post_deps = submit->post_deps;
+
+ if (post_deps) {
+ struct dma_fence *fence = &submit->out_fence->f;
+ u32 i;
+
+ for (i = 0; i < submit->num_out_syncobjs; i++) {
+ if (post_deps[i].chain) {
+ drm_syncobj_add_point(post_deps[i].syncobj,
+ post_deps[i].chain,
+ fence, post_deps[i].point);
+ post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(post_deps[i].syncobj,
+ fence);
+ }
+ }
+ }
+}
+
static int virtio_gpu_fence_event_create(struct drm_device *dev,
struct drm_file *file,
struct virtio_gpu_fence *fence,
u32 ring_idx)
{
- struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_fence_event *e = NULL;
int ret;
- if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
- return 0;
-
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return -ENOMEM;
@@ -122,6 +328,10 @@ static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)
static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
{
+ virtio_gpu_reset_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+ virtio_gpu_free_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+ virtio_gpu_free_post_deps(submit->post_deps, submit->num_out_syncobjs);
+
if (!IS_ERR(submit->buf))
kvfree(submit->buf);
@@ -164,18 +374,31 @@ static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fence *out_fence;
+ bool drm_fence_event;
int err;
memset(submit, 0, sizeof(*submit));
- out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
- if (!out_fence)
- return -ENOMEM;
-
- err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
- if (err) {
- dma_fence_put(&out_fence->f);
- return err;
+ if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
+ (vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
+ drm_fence_event = true;
+ else
+ drm_fence_event = false;
+
+ if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
+ exbuf->num_out_syncobjs ||
+ exbuf->num_bo_handles ||
+ drm_fence_event)
+ out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
+ else
+ out_fence = NULL;
+
+ if (drm_fence_event) {
+ err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+ if (err) {
+ dma_fence_put(&out_fence->f);
+ return err;
+ }
}
submit->out_fence = out_fence;
@@ -283,6 +506,14 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
if (ret)
goto cleanup;
+ ret = virtio_gpu_parse_post_deps(&submit);
+ if (ret)
+ goto cleanup;
+
+ ret = virtio_gpu_parse_deps(&submit);
+ if (ret)
+ goto cleanup;
+
/*
* Await in-fences in the end of the job submission path to
* optimize the path by proceeding directly to the submission
@@ -303,6 +534,7 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
* the job submission path.
*/
virtio_gpu_install_out_fence_fd(&submit);
+ virtio_gpu_process_post_deps(&submit);
virtio_gpu_complete_submit(&submit);
cleanup:
virtio_gpu_cleanup_submit(&submit);
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 906d3df40cdb..d5d4f642d367 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
#include <linux/minmax.h>
@@ -23,7 +24,7 @@ static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
/**
* pre_mul_alpha_blend - alpha blending equation
- * @src_frame_info: source framebuffer's metadata
+ * @frame_info: Source framebuffer's metadata
* @stage_buffer: The line with the pixels from src_plane
* @output_buffer: A line buffer that receives all the blends output
*
@@ -89,12 +90,81 @@ static void fill_background(const struct pixel_argb_u16 *background_color,
output_buffer->pixels[i] = *background_color;
}
+// lerp(a, b, t) = a + (b - a) * t
+static u16 lerp_u16(u16 a, u16 b, s64 t)
+{
+ s64 a_fp = drm_int2fixp(a);
+ s64 b_fp = drm_int2fixp(b);
+
+ s64 delta = drm_fixp_mul(b_fp - a_fp, t);
+
+ return drm_fixp2int(a_fp + delta);
+}
+
+static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
+{
+ s64 color_channel_fp = drm_int2fixp(channel_value);
+
+ return drm_fixp_mul(color_channel_fp, lut->channel_value2index_ratio);
+}
+
+/*
+ * This enum is related to the positions of the variables inside
+ * `struct drm_color_lut`, so the order of both needs to be the same.
+ */
+enum lut_channel {
+ LUT_RED = 0,
+ LUT_GREEN,
+ LUT_BLUE,
+ LUT_RESERVED
+};
+
+static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value,
+ enum lut_channel channel)
+{
+ s64 lut_index = get_lut_index(lut, channel_value);
+
+ /*
+ * This checks if `struct drm_color_lut` has any gap added by the compiler
+ * between the struct fields.
+ */
+ static_assert(sizeof(struct drm_color_lut) == sizeof(__u16) * 4);
+
+ u16 *floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
+ u16 *ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
+
+ u16 floor_channel_value = floor_lut_value[channel];
+ u16 ceil_channel_value = ceil_lut_value[channel];
+
+ return lerp_u16(floor_channel_value, ceil_channel_value,
+ lut_index & DRM_FIXED_DECIMAL_MASK);
+}
+
+static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buffer *output_buffer)
+{
+ if (!crtc_state->gamma_lut.base)
+ return;
+
+ if (!crtc_state->gamma_lut.lut_length)
+ return;
+
+ for (size_t x = 0; x < output_buffer->n_pixels; x++) {
+ struct pixel_argb_u16 *pixel = &output_buffer->pixels[x];
+
+ pixel->r = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->r, LUT_RED);
+ pixel->g = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->g, LUT_GREEN);
+ pixel->b = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->b, LUT_BLUE);
+ }
+}
+
/**
- * @wb_frame_info: The writeback frame buffer metadata
+ * blend - blend the pixels from all planes and compute crc
+ * @wb: The writeback frame buffer metadata
* @crtc_state: The crtc state
* @crc32: The crc output of the final frame
* @output_buffer: A buffer of a row that will receive the result of the blend(s)
* @stage_buffer: The line with the pixels from plane being blend to the output
+ * @row_size: The size, in bytes, of a single row
*
* This function blends the pixels (Using the `pre_mul_alpha_blend`)
* from all planes, calculates the crc32 of the output from the former step,
@@ -128,10 +198,12 @@ static void blend(struct vkms_writeback_job *wb,
output_buffer);
}
+ apply_lut(crtc_state, output_buffer);
+
*crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
if (wb)
- wb->wb_write(&wb->wb_frame_info, output_buffer, y_pos);
+ vkms_writeback_row(wb, output_buffer, y_pos);
}
}
@@ -145,7 +217,7 @@ static int check_format_funcs(struct vkms_crtc_state *crtc_state,
if (!planes[i]->pixel_read)
return -1;
- if (active_wb && !active_wb->wb_write)
+ if (active_wb && !active_wb->pixel_write)
return -1;
return 0;
@@ -242,6 +314,22 @@ void vkms_composer_worker(struct work_struct *work)
crtc_state->frame_start = 0;
crtc_state->frame_end = 0;
crtc_state->crc_pending = false;
+
+ if (crtc->state->gamma_lut) {
+ s64 max_lut_index_fp;
+ s64 u16_max_fp = drm_int2fixp(0xffff);
+
+ crtc_state->gamma_lut.base = (struct drm_color_lut *)crtc->state->gamma_lut->data;
+ crtc_state->gamma_lut.lut_length =
+ crtc->state->gamma_lut->length / sizeof(struct drm_color_lut);
+ max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length - 1);
+ crtc_state->gamma_lut.channel_value2index_ratio = drm_fixp_div(max_lut_index_fp,
+ u16_max_fp);
+
+ } else {
+ crtc_state->gamma_lut.base = NULL;
+ }
+
spin_unlock_irq(&out->composer_lock);
/*
@@ -320,10 +408,15 @@ void vkms_set_composer(struct vkms_output *out, bool enabled)
if (enabled)
drm_crtc_vblank_get(&out->crtc);
- spin_lock_irq(&out->lock);
+ mutex_lock(&out->enabled_lock);
old_enabled = out->composer_enabled;
out->composer_enabled = enabled;
- spin_unlock_irq(&out->lock);
+
+ /* the composition wasn't enabled, so unlock the lock to make sure the lock
+ * will be balanced even if we have a failed commit
+ */
+ if (!out->composer_enabled)
+ mutex_unlock(&out->enabled_lock);
if (old_enabled)
drm_crtc_vblank_put(&out->crtc);
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 515f6772b866..3c5ebf106b66 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -16,7 +16,7 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
struct drm_crtc *crtc = &output->crtc;
struct vkms_crtc_state *state;
u64 ret_overrun;
- bool ret, fence_cookie;
+ bool ret, fence_cookie, composer_enabled;
fence_cookie = dma_fence_begin_signalling();
@@ -25,15 +25,15 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
if (ret_overrun != 1)
pr_warn("%s: vblank timer overrun\n", __func__);
- spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
state = output->composer_state;
- spin_unlock(&output->lock);
+ composer_enabled = output->composer_enabled;
+ mutex_unlock(&output->enabled_lock);
- if (state && output->composer_enabled) {
+ if (state && composer_enabled) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
/* update frame_start only if a queued vkms_composer_worker()
@@ -290,8 +290,12 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
+ drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
+
spin_lock_init(&vkms_out->lock);
spin_lock_init(&vkms_out->composer_lock);
+ mutex_init(&vkms_out->enabled_lock);
vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
if (!vkms_out->composer_workq)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index e3c9c9571c8d..dd0af086e7fa 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -120,9 +120,27 @@ static const struct drm_driver vkms_driver = {
.minor = DRIVER_MINOR,
};
+static int vkms_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->gamma_lut || !new_crtc_state->color_mgmt_changed)
+ continue;
+
+ if (new_crtc_state->gamma_lut->length / sizeof(struct drm_color_lut *)
+ > VKMS_LUT_SIZE)
+ return -EINVAL;
+ }
+
+ return drm_atomic_helper_check(dev, state);
+}
+
static const struct drm_mode_config_funcs vkms_mode_funcs = {
.fb_create = drm_gem_fb_create,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = vkms_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 5f1a0a44a78c..c7ae6c2ba1df 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -23,6 +23,8 @@
#define NUM_OVERLAY_PLANES 8
+#define VKMS_LUT_SIZE 256
+
struct vkms_frame_info {
struct drm_framebuffer *fb;
struct drm_rect src, dst;
@@ -46,8 +48,7 @@ struct line_buffer {
struct vkms_writeback_job {
struct iosys_map data[DRM_FORMAT_MAX_PLANES];
struct vkms_frame_info wb_frame_info;
- void (*wb_write)(struct vkms_frame_info *frame_info,
- const struct line_buffer *buffer, int y);
+ void (*pixel_write)(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel);
};
/**
@@ -65,6 +66,12 @@ struct vkms_plane {
struct drm_plane base;
};
+struct vkms_color_lut {
+ struct drm_color_lut *base;
+ size_t lut_length;
+ s64 channel_value2index_ratio;
+};
+
/**
* vkms_crtc_state - Driver specific CRTC state
* @base: base CRTC state
@@ -80,6 +87,7 @@ struct vkms_crtc_state {
/* stack of active planes for crc computation, should be in z order */
struct vkms_plane_state **active_planes;
struct vkms_writeback_job *active_writeback;
+ struct vkms_color_lut gamma_lut;
/* below four are protected by vkms_output.composer_lock */
bool crc_pending;
@@ -100,8 +108,10 @@ struct vkms_output {
struct workqueue_struct *composer_workq;
/* protects concurrent access to composer */
spinlock_t lock;
+ /* guarantees that if the composer is enabled, a job will be queued */
+ struct mutex enabled_lock;
- /* protected by @lock */
+ /* protected by @enabled_lock */
bool composer_enabled;
struct vkms_crtc_state *composer_state;
@@ -157,6 +167,7 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
void vkms_composer_worker(struct work_struct *work);
void vkms_set_composer(struct vkms_output *out, bool enabled);
void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y);
+void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y);
/* Writeback */
int vkms_enable_writeback_connector(struct vkms_device *vkmsdev);
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 5945da0beba6..36046b12f296 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -111,6 +111,19 @@ static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
out_pixel->b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
}
+/**
+ * vkms_compose_row - compose a single row of a plane
+ * @stage_buffer: output line with the composed pixels
+ * @plane: state of the plane that is being composed
+ * @y: y coordinate of the row
+ *
+ * This function composes a single row of a plane. It gets the source pixels
+ * through the y coordinate (see get_packed_src_addr()) and goes linearly
+ * through the source pixel, reading the pixels and converting it to
+ * ARGB16161616 (see the pixel_read() callback). For rotate-90 and rotate-270,
+ * the source pixels are not traversed linearly. The source pixels are queried
+ * on each iteration in order to traverse the pixels vertically.
+ */
void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y)
{
struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
@@ -137,107 +150,81 @@ void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state
* They are used in the `compose_active_planes` to convert and store a line
* from the src_buffer to the writeback buffer.
*/
-static void argb_u16_to_ARGB8888(struct vkms_frame_info *frame_info,
- const struct line_buffer *src_buffer, int y)
+static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- int x_dst = frame_info->dst.x1;
- u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
- struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
- src_buffer->n_pixels);
-
- for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
- /*
- * This sequence below is important because the format's byte order is
- * in little-endian. In the case of the ARGB8888 the memory is
- * organized this way:
- *
- * | Addr | = blue channel
- * | Addr + 1 | = green channel
- * | Addr + 2 | = Red channel
- * | Addr + 3 | = Alpha channel
- */
- dst_pixels[3] = DIV_ROUND_CLOSEST(in_pixels[x].a, 257);
- dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixels[x].r, 257);
- dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixels[x].g, 257);
- dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixels[x].b, 257);
- }
+ /*
+ * This sequence below is important because the format's byte order is
+ * in little-endian. In the case of the ARGB8888 the memory is
+ * organized this way:
+ *
+ * | Addr | = blue channel
+ * | Addr + 1 | = green channel
+ * | Addr + 2 | = Red channel
+ * | Addr + 3 | = Alpha channel
+ */
+ dst_pixels[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
+ dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+ dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
-static void argb_u16_to_XRGB8888(struct vkms_frame_info *frame_info,
- const struct line_buffer *src_buffer, int y)
+static void argb_u16_to_XRGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- int x_dst = frame_info->dst.x1;
- u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
- struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
- src_buffer->n_pixels);
-
- for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
- dst_pixels[3] = 0xff;
- dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixels[x].r, 257);
- dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixels[x].g, 257);
- dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixels[x].b, 257);
- }
+ dst_pixels[3] = 0xff;
+ dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+ dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
-static void argb_u16_to_ARGB16161616(struct vkms_frame_info *frame_info,
- const struct line_buffer *src_buffer, int y)
+static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- int x_dst = frame_info->dst.x1;
- u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
- struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
- src_buffer->n_pixels);
-
- for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
- dst_pixels[3] = cpu_to_le16(in_pixels[x].a);
- dst_pixels[2] = cpu_to_le16(in_pixels[x].r);
- dst_pixels[1] = cpu_to_le16(in_pixels[x].g);
- dst_pixels[0] = cpu_to_le16(in_pixels[x].b);
- }
+ u16 *pixels = (u16 *)dst_pixels;
+
+ pixels[3] = cpu_to_le16(in_pixel->a);
+ pixels[2] = cpu_to_le16(in_pixel->r);
+ pixels[1] = cpu_to_le16(in_pixel->g);
+ pixels[0] = cpu_to_le16(in_pixel->b);
}
-static void argb_u16_to_XRGB16161616(struct vkms_frame_info *frame_info,
- const struct line_buffer *src_buffer, int y)
+static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- int x_dst = frame_info->dst.x1;
- u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
- struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
- src_buffer->n_pixels);
-
- for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
- dst_pixels[3] = 0xffff;
- dst_pixels[2] = cpu_to_le16(in_pixels[x].r);
- dst_pixels[1] = cpu_to_le16(in_pixels[x].g);
- dst_pixels[0] = cpu_to_le16(in_pixels[x].b);
- }
+ u16 *pixels = (u16 *)dst_pixels;
+
+ pixels[3] = 0xffff;
+ pixels[2] = cpu_to_le16(in_pixel->r);
+ pixels[1] = cpu_to_le16(in_pixel->g);
+ pixels[0] = cpu_to_le16(in_pixel->b);
}
-static void argb_u16_to_RGB565(struct vkms_frame_info *frame_info,
- const struct line_buffer *src_buffer, int y)
+static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- int x_dst = frame_info->dst.x1;
- u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
- struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
- src_buffer->n_pixels);
+ u16 *pixels = (u16 *)dst_pixels;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
- for (size_t x = 0; x < x_limit; x++, dst_pixels++) {
- s64 fp_r = drm_int2fixp(in_pixels[x].r);
- s64 fp_g = drm_int2fixp(in_pixels[x].g);
- s64 fp_b = drm_int2fixp(in_pixels[x].b);
+ s64 fp_r = drm_int2fixp(in_pixel->r);
+ s64 fp_g = drm_int2fixp(in_pixel->g);
+ s64 fp_b = drm_int2fixp(in_pixel->b);
- u16 r = drm_fixp2int_round(drm_fixp_div(fp_r, fp_rb_ratio));
- u16 g = drm_fixp2int_round(drm_fixp_div(fp_g, fp_g_ratio));
- u16 b = drm_fixp2int_round(drm_fixp_div(fp_b, fp_rb_ratio));
+ u16 r = drm_fixp2int(drm_fixp_div(fp_r, fp_rb_ratio));
+ u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
+ u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
- *dst_pixels = cpu_to_le16(r << 11 | g << 5 | b);
- }
+ *pixels = cpu_to_le16(r << 11 | g << 5 | b);
+}
+
+void vkms_writeback_row(struct vkms_writeback_job *wb,
+ const struct line_buffer *src_buffer, int y)
+{
+ struct vkms_frame_info *frame_info = &wb->wb_frame_info;
+ int x_dst = frame_info->dst.x1;
+ u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst), src_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += frame_info->cpp)
+ wb->pixel_write(dst_pixels, &in_pixels[x]);
}
void *get_pixel_conversion_function(u32 format)
@@ -258,7 +245,7 @@ void *get_pixel_conversion_function(u32 format)
}
}
-void *get_line_to_frame_function(u32 format)
+void *get_pixel_write_function(u32 format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
index c5b113495d0c..cf59c2ed8e9a 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.h
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -7,6 +7,6 @@
void *get_pixel_conversion_function(u32 format);
-void *get_line_to_frame_function(u32 format);
+void *get_pixel_write_function(u32 format);
#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 84a51cd281b9..d7e63aa14663 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -15,6 +15,7 @@
#include "vkms_formats.h"
static const u32 vkms_wb_formats[] = {
+ DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XRGB16161616,
DRM_FORMAT_ARGB16161616,
@@ -142,13 +143,15 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
spin_lock_irq(&output->composer_lock);
crtc_state->active_writeback = active_wb;
+ crtc_state->wb_pending = true;
+ spin_unlock_irq(&output->composer_lock);
+
wb_frame_info->offset = fb->offsets[0];
wb_frame_info->pitch = fb->pitches[0];
wb_frame_info->cpp = fb->format->cpp[0];
- crtc_state->wb_pending = true;
- spin_unlock_irq(&output->composer_lock);
+
drm_writeback_queue_job(wb_conn, connector_state);
- active_wb->wb_write = get_line_to_frame_function(wb_format);
+ active_wb->pixel_write = get_pixel_write_function(wb_format);
drm_rect_init(&wb_frame_info->src, 0, 0, crtc_width, crtc_height);
drm_rect_init(&wb_frame_info->dst, 0, 0, crtc_width, crtc_height);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
index 0b74ca2dfb7b..23899d743a90 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
@@ -105,10 +105,14 @@
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
- asm volatile ("push %%rbp;" \
+ asm volatile ( \
+ UNWIND_HINT_SAVE \
+ "push %%rbp;" \
+ UNWIND_HINT_UNDEFINED \
"mov %12, %%rbp;" \
VMWARE_HYPERCALL_HB_OUT \
- "pop %%rbp;" : \
+ "pop %%rbp;" \
+ UNWIND_HINT_RESTORE : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
@@ -130,10 +134,14 @@
flags, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
- asm volatile ("push %%rbp;" \
+ asm volatile ( \
+ UNWIND_HINT_SAVE \
+ "push %%rbp;" \
+ UNWIND_HINT_UNDEFINED \
"mov %12, %%rbp;" \
VMWARE_HYPERCALL_HB_IN \
- "pop %%rbp" : \
+ "pop %%rbp;" \
+ UNWIND_HINT_RESTORE : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 90996c108146..aab79c5e34c2 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
@@ -474,10 +473,7 @@ DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops);
static const struct drm_driver xen_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.release = xen_drm_drv_release,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
.dumb_create = xen_drm_drv_dumb_create,
.fops = &xen_drm_dev_fops,
.name = "xendrm-du",
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index 3b87eebddc97..407bc07cec69 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -1094,8 +1094,8 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
"%s%u", dma_names[layer->id], i);
dma->chan = dma_request_chan(disp->dev, dma_channel_name);
if (IS_ERR(dma->chan)) {
- dev_err(disp->dev, "failed to request dma channel\n");
- ret = PTR_ERR(dma->chan);
+ ret = dev_err_probe(disp->dev, PTR_ERR(dma->chan),
+ "failed to request dma channel\n");
dma->chan = NULL;
return ret;
}
@@ -1228,7 +1228,6 @@ int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub)
{
struct platform_device *pdev = to_platform_device(dpsub->dev);
struct zynqmp_disp *disp;
- struct resource *res;
int ret;
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
@@ -1238,22 +1237,19 @@ int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub)
disp->dev = &pdev->dev;
disp->dpsub = dpsub;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "blend");
- disp->blend.base = devm_ioremap_resource(disp->dev, res);
+ disp->blend.base = devm_platform_ioremap_resource_byname(pdev, "blend");
if (IS_ERR(disp->blend.base)) {
ret = PTR_ERR(disp->blend.base);
goto error;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "av_buf");
- disp->avbuf.base = devm_ioremap_resource(disp->dev, res);
+ disp->avbuf.base = devm_platform_ioremap_resource_byname(pdev, "av_buf");
if (IS_ERR(disp->avbuf.base)) {
ret = PTR_ERR(disp->avbuf.base);
goto error;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
- disp->audio.base = devm_ioremap_resource(disp->dev, res);
+ disp->audio.base = devm_platform_ioremap_resource_byname(pdev, "aud");
if (IS_ERR(disp->audio.base)) {
ret = PTR_ERR(disp->audio.base);
goto error;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 0a7b466446fb..a0606fab0e22 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -784,7 +784,7 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp)
}
/**
- * zynqmp_dp_link_train - Train the link
+ * zynqmp_dp_train - Train the link
* @dp: DisplayPort IP core structure
*
* Return: 0 if all trains are done successfully, or corresponding error code.
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index bab862484d42..88eb33acd5f0 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -227,7 +227,9 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
dpsub->dev = &pdev->dev;
platform_set_drvdata(pdev, dpsub);
- dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
+ ret = dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
+ if (ret)
+ return ret;
/* Try the reserved memory. Proceed if there's none. */
of_reserved_mem_device_init(&pdev->dev);
@@ -280,7 +282,7 @@ err_mem:
return ret;
}
-static int zynqmp_dpsub_remove(struct platform_device *pdev)
+static void zynqmp_dpsub_remove(struct platform_device *pdev)
{
struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
@@ -298,8 +300,6 @@ static int zynqmp_dpsub_remove(struct platform_device *pdev)
if (!dpsub->drm)
zynqmp_dpsub_release(dpsub);
-
- return 0;
}
static void zynqmp_dpsub_shutdown(struct platform_device *pdev)
@@ -320,7 +320,7 @@ MODULE_DEVICE_TABLE(of, zynqmp_dpsub_of_match);
static struct platform_driver zynqmp_dpsub_driver = {
.probe = zynqmp_dpsub_probe,
- .remove = zynqmp_dpsub_remove,
+ .remove_new = zynqmp_dpsub_remove,
.shutdown = zynqmp_dpsub_shutdown,
.driver = {
.name = "zynqmp-dpsub",
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 4d16a3396c4a..84d042796d2e 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -338,32 +338,15 @@ static int host1x_device_match(struct device *dev, struct device_driver *drv)
return strcmp(dev_name(dev), drv->name) == 0;
}
+/*
+ * Note that this is really only needed for backwards compatibility
+ * with libdrm, which parses this information from sysfs and will
+ * fail if it can't find the OF_FULLNAME, specifically.
+ */
static int host1x_device_uevent(const struct device *dev,
struct kobj_uevent_env *env)
{
- struct device_node *np = dev->parent->of_node;
- unsigned int count = 0;
- struct property *p;
- const char *compat;
-
- /*
- * This duplicates most of of_device_uevent(), but the latter cannot
- * be called from modules and operates on dev->of_node, which is not
- * available in this case.
- *
- * Note that this is really only needed for backwards compatibility
- * with libdrm, which parses this information from sysfs and will
- * fail if it can't find the OF_FULLNAME, specifically.
- */
- add_uevent_var(env, "OF_NAME=%pOFn", np);
- add_uevent_var(env, "OF_FULLNAME=%pOF", np);
-
- of_property_for_each_string(np, "compatible", p, compat) {
- add_uevent_var(env, "OF_COMPATIBLE_%u=%s", count, compat);
- count++;
- }
-
- add_uevent_var(env, "OF_COMPATIBLE_N=%u", count);
+ of_device_uevent(dev->parent, env);
return 0;
}
diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
index 9ad89d22c0ca..a3f336edd991 100644
--- a/drivers/gpu/host1x/context.c
+++ b/drivers/gpu/host1x/context.c
@@ -6,7 +6,7 @@
#include <linux/device.h>
#include <linux/kref.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/of_device.h>
#include <linux/pid.h>
#include <linux/slab.h>
@@ -79,6 +79,14 @@ int host1x_memory_context_list_init(struct host1x *host1x)
!device_iommu_mapped(&ctx->dev)) {
dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
device_unregister(&ctx->dev);
+
+ /*
+ * This means that if IOMMU is disabled but context devices
+ * are defined in the device tree, Host1x will fail to probe.
+ * That's probably OK in this time and age.
+ */
+ err = -EINVAL;
+
goto unreg_devices;
}
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index aae2efeef503..7c6699aed7d2 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -11,8 +11,9 @@
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index c35eac1116f5..71ec1e7f657a 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -18,7 +18,7 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index ad82c9e0252f..aef984a43190 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -271,15 +271,13 @@ u32 ipu_pre_get_baddr(struct ipu_pre *pre)
static int ipu_pre_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct ipu_pre *pre;
pre = devm_kzalloc(dev, sizeof(*pre), GFP_KERNEL);
if (!pre)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pre->regs = devm_ioremap_resource(&pdev->dev, res);
+ pre->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pre->regs))
return PTR_ERR(pre->regs);
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 196797c1b4b3..729605709955 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -358,7 +358,6 @@ EXPORT_SYMBOL_GPL(ipu_prg_channel_configure_pending);
static int ipu_prg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct ipu_prg *prg;
u32 val;
int i, ret;
@@ -367,12 +366,10 @@ static int ipu_prg_probe(struct platform_device *pdev)
if (!prg)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- prg->regs = devm_ioremap_resource(&pdev->dev, res);
+ prg->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(prg->regs))
return PTR_ERR(prg->regs);
-
prg->clk_ipg = devm_clk_get(dev, "ipg");
if (IS_ERR(prg->clk_ipg))
return PTR_ERR(prg->clk_ipg);