diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 88 |
1 files changed, 58 insertions, 30 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3987ecb24ef4..625424f3082b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -913,7 +913,10 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev) { amdgpu_asic_pre_asic_init(adev); - return amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) + return amdgpu_atomfirmware_asic_init(adev, true); + else + return amdgpu_atom_asic_init(adev->mode_info.atom_context); } /** @@ -1041,19 +1044,25 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) adev->doorbell.base = pci_resource_start(adev->pdev, 2); adev->doorbell.size = pci_resource_len(adev->pdev, 2); - adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), - adev->doorbell_index.max_assignment+1); - if (adev->doorbell.num_doorbells == 0) - return -EINVAL; - - /* For Vega, reserve and map two pages on doorbell BAR since SDMA - * paging queue doorbell use the second page. The - * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the - * doorbells are in the first page. So with paging queue enabled, - * the max num_doorbells should + 1 page (0x400 in dword) - */ - if (adev->asic_type >= CHIP_VEGA10) - adev->doorbell.num_doorbells += 0x400; + if (adev->enable_mes) { + adev->doorbell.num_doorbells = + adev->doorbell.size / sizeof(u32); + } else { + adev->doorbell.num_doorbells = + min_t(u32, adev->doorbell.size / sizeof(u32), + adev->doorbell_index.max_assignment+1); + if (adev->doorbell.num_doorbells == 0) + return -EINVAL; + + /* For Vega, reserve and map two pages on doorbell BAR since SDMA + * paging queue doorbell use the second page. The + * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the + * doorbells are in the first page. So with paging queue enabled, + * the max num_doorbells should + 1 page (0x400 in dword) + */ + if (adev->asic_type >= CHIP_VEGA10) + adev->doorbell.num_doorbells += 0x400; + } adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * @@ -1547,9 +1556,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); - amdgpu_gmc_tmz_set(adev); - - return 0; } @@ -1703,7 +1709,7 @@ int amdgpu_device_ip_set_powergating_state(void *dev, * clockgating is enabled. */ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, - u32 *flags) + u64 *flags) { int i; @@ -1926,11 +1932,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) adev->firmware.gpu_info_fw = NULL; if (adev->mman.discovery_bin) { - amdgpu_discovery_get_gfx_info(adev); - /* * FIXME: The bounding box is still needed by Navi12, so - * temporarily read it from gpu_info firmware. Should be droped + * temporarily read it from gpu_info firmware. Should be dropped * when DAL no longer needs it. */ if (adev->asic_type != CHIP_NAVI12) @@ -3663,8 +3667,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (amdgpu_mcbp) DRM_INFO("MCBP is enabled\n"); - if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10) - adev->enable_mes = true; + if (adev->asic_type >= CHIP_NAVI10) { + if (amdgpu_mes || amdgpu_mes_kiq) + adev->enable_mes = true; + + if (amdgpu_mes_kiq) + adev->enable_mes_kiq = true; + } /* * Reset domain needs to be present early, before XGMI hive discovered @@ -3689,6 +3698,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) return r; + /* Enable TMZ based on IP_VERSION */ + amdgpu_gmc_tmz_set(adev); + amdgpu_gmc_noretry_set(adev); /* Need to get xgmi info early to decide the reset behavior*/ if (adev->gmc.xgmi.supported) { @@ -3700,7 +3712,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* enable PCIE atomic ops */ if (amdgpu_sriov_vf(adev)) adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) - adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags == + adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); else adev->have_atomics_support = @@ -3857,6 +3869,14 @@ fence_driver_init: } else adev->ucode_sysfs_en = true; + r = amdgpu_psp_sysfs_init(adev); + if (r) { + adev->psp_sysfs_en = false; + if (!amdgpu_sriov_vf(adev)) + DRM_ERROR("Creating psp sysfs failed\n"); + } else + adev->psp_sysfs_en = true; + /* * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. * Otherwise the mgpu fan boost feature will be skipped due to the @@ -3960,10 +3980,6 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) { dev_info(adev->dev, "amdgpu: finishing device.\n"); flush_delayed_work(&adev->delayed_init_work); - if (adev->mman.initialized) { - flush_delayed_work(&adev->mman.bdev.wq); - ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); - } adev->shutdown = true; /* make sure IB test finished before entering exclusive mode @@ -3984,10 +4000,17 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) } amdgpu_fence_driver_hw_fini(adev); + if (adev->mman.initialized) { + flush_delayed_work(&adev->mman.bdev.wq); + ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); + } + if (adev->pm_sysfs_en) amdgpu_pm_sysfs_fini(adev); if (adev->ucode_sysfs_en) amdgpu_ucode_sysfs_fini(adev); + if (adev->psp_sysfs_en) + amdgpu_psp_sysfs_fini(adev); sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); /* disable ras feature must before hw fini */ @@ -4486,6 +4509,7 @@ retry: if (!r) { amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); + amdgpu_amdkfd_post_reset(adev); } @@ -5195,6 +5219,10 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ r = amdgpu_device_reset_sriov(adev, job ? false : true); if (r) adev->asic_reset_res = r; + + /* Aldebaran supports ras in SRIOV, so need resume ras during reset */ + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + amdgpu_ras_resume(adev); } else { r = amdgpu_do_asic_reset(device_list_handle, &reset_context); if (r && r == -EAGAIN) @@ -5733,7 +5761,7 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) return; #endif if (adev->gmc.xgmi.connected_to_cpu) @@ -5749,7 +5777,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) return; #endif if (adev->gmc.xgmi.connected_to_cpu) |