diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 285 |
1 files changed, 208 insertions, 77 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 34af664b9f93..9883fa9bb41b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -25,6 +25,7 @@ * Alex Deucher * Jerome Glisse */ +#include <linux/power_supply.h> #include <linux/kthread.h> #include <linux/console.h> #include <linux/slab.h> @@ -83,8 +84,10 @@ static const char *amdgpu_asic_name[] = { "POLARIS10", "POLARIS11", "POLARIS12", + "VEGAM", "VEGA10", "VEGA12", + "VEGA20", "RAVEN", "LAST", }; @@ -673,34 +676,34 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev, } /** - * amdgpu_device_gart_location - try to find GTT location + * amdgpu_device_gart_location - try to find GART location * * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations * - * Function will place try to place GTT before or after VRAM. + * Function will place try to place GART before or after VRAM. * - * If GTT size is bigger than space left then we ajust GTT size. + * If GART size is bigger than space left then we ajust GART size. * Thus function will never fails. - * - * FIXME: when reducing GTT size align new size on power of 2. */ void amdgpu_device_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) { u64 size_af, size_bf; + mc->gart_size += adev->pm.smu_prv_buffer_size; + size_af = adev->gmc.mc_mask - mc->vram_end; size_bf = mc->vram_start; if (size_bf > size_af) { if (mc->gart_size > size_bf) { - dev_warn(adev->dev, "limiting GTT\n"); + dev_warn(adev->dev, "limiting GART\n"); mc->gart_size = size_bf; } mc->gart_start = 0; } else { if (mc->gart_size > size_af) { - dev_warn(adev->dev, "limiting GTT\n"); + dev_warn(adev->dev, "limiting GART\n"); mc->gart_size = size_af; } /* VCE doesn't like it when BOs cross a 4GB segment, so align @@ -709,7 +712,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev, mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL); } mc->gart_end = mc->gart_start + mc->gart_size - 1; - dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", + dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", mc->gart_size >> 20, mc->gart_start, mc->gart_end); } @@ -907,6 +910,46 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) } } +static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) +{ + struct sysinfo si; + bool is_os_64 = (sizeof(void *) == 8) ? true : false; + uint64_t total_memory; + uint64_t dram_size_seven_GB = 0x1B8000000; + uint64_t dram_size_three_GB = 0xB8000000; + + if (amdgpu_smu_memory_pool_size == 0) + return; + + if (!is_os_64) { + DRM_WARN("Not 64-bit OS, feature not supported\n"); + goto def_value; + } + si_meminfo(&si); + total_memory = (uint64_t)si.totalram * si.mem_unit; + + if ((amdgpu_smu_memory_pool_size == 1) || + (amdgpu_smu_memory_pool_size == 2)) { + if (total_memory < dram_size_three_GB) + goto def_value1; + } else if ((amdgpu_smu_memory_pool_size == 4) || + (amdgpu_smu_memory_pool_size == 8)) { + if (total_memory < dram_size_seven_GB) + goto def_value1; + } else { + DRM_WARN("Smu memory pool size not supported\n"); + goto def_value; + } + adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; + + return; + +def_value1: + DRM_WARN("No enough system memory\n"); +def_value: + adev->pm.smu_prv_buffer_size = 0; +} + /** * amdgpu_device_check_arguments - validate module params * @@ -948,6 +991,8 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev) amdgpu_vm_fragment_size = -1; } + amdgpu_device_check_smu_prv_buffer_size(adev); + amdgpu_device_check_vm_size(adev); amdgpu_device_check_block_size(adev); @@ -1031,7 +1076,7 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { /** * amdgpu_device_ip_set_clockgating_state - set the CG state * - * @adev: amdgpu_device pointer + * @dev: amdgpu_device pointer * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) * @state: clockgating state (gate or ungate) * @@ -1039,10 +1084,11 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { * the hardware IP specified. * Returns the error code from the last instance. */ -int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, +int amdgpu_device_ip_set_clockgating_state(void *dev, enum amd_ip_block_type block_type, enum amd_clockgating_state state) { + struct amdgpu_device *adev = dev; int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -1064,7 +1110,7 @@ int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, /** * amdgpu_device_ip_set_powergating_state - set the PG state * - * @adev: amdgpu_device pointer + * @dev: amdgpu_device pointer * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) * @state: powergating state (gate or ungate) * @@ -1072,10 +1118,11 @@ int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, * the hardware IP specified. * Returns the error code from the last instance. */ -int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev, +int amdgpu_device_ip_set_powergating_state(void *dev, enum amd_ip_block_type block_type, enum amd_powergating_state state) { + struct amdgpu_device *adev = dev; int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -1174,7 +1221,7 @@ bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, * amdgpu_device_ip_get_ip_block - get a hw IP pointer * * @adev: amdgpu_device pointer - * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) + * @type: Type of hardware IP (SMU, GFX, UVD, etc.) * * Returns a pointer to the hardware IP block structure * if it exists for the asic, otherwise NULL. @@ -1320,9 +1367,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_TOPAZ: case CHIP_TONGA: case CHIP_FIJI: - case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: case CHIP_CARRIZO: case CHIP_STONEY: #ifdef CONFIG_DRM_AMDGPU_SI @@ -1339,6 +1387,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_KABINI: case CHIP_MULLINS: #endif + case CHIP_VEGA20: default: return 0; case CHIP_VEGA10: @@ -1428,9 +1477,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) case CHIP_TOPAZ: case CHIP_TONGA: case CHIP_FIJI: - case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: case CHIP_CARRIZO: case CHIP_STONEY: if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) @@ -1472,6 +1522,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) #endif case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: if (adev->asic_type == CHIP_RAVEN) adev->family = AMDGPU_FAMILY_RV; @@ -1499,6 +1550,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) return -EAGAIN; } + adev->powerplay.pp_feature = amdgpu_pp_feature_mask; + for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_ERROR("disabled ip block: %d <%s>\n", @@ -1660,6 +1713,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* enable clockgating to save power */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, @@ -1671,6 +1725,35 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) } } } + + return 0; +} + +static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev) +{ + int i = 0, r; + + if (amdgpu_emu_mode == 1) + return 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + /* skip CG for VCE/UVD, it's handled specially */ + if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && + adev->ip_blocks[i].version->funcs->set_powergating_state) { + /* enable powergating to save power */ + r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, + AMD_PG_STATE_GATE); + if (r) { + DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + } + } return 0; } @@ -1704,8 +1787,11 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) } } - mod_delayed_work(system_wq, &adev->late_init_work, - msecs_to_jiffies(AMDGPU_RESUME_MS)); + amdgpu_device_ip_late_set_cg_state(adev); + amdgpu_device_ip_late_set_pg_state(adev); + + queue_delayed_work(system_wq, &adev->late_init_work, + msecs_to_jiffies(AMDGPU_RESUME_MS)); amdgpu_device_fill_reset_magic(adev); @@ -1742,6 +1828,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) adev->ip_blocks[i].version->funcs->name, r); return r; } + if (adev->powerplay.pp_funcs->set_powergating_by_smu) + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false); r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); /* XXX handle errors */ if (r) { @@ -1759,6 +1847,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* ungate blocks before hw fini so that we can shutdown the blocks safely */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, @@ -1829,7 +1918,11 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, late_init_work.work); - amdgpu_device_ip_late_set_cg_state(adev); + int r; + + r = amdgpu_ib_ring_tests(adev); + if (r) + DRM_ERROR("ib ring test failed (%d).\n", r); } /** @@ -1857,6 +1950,10 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r); } + /* call smu to disable gfx off feature first when suspend */ + if (adev->powerplay.pp_funcs->set_powergating_by_smu) + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false); + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; @@ -2080,22 +2177,29 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) switch (asic_type) { #if defined(CONFIG_DRM_AMD_DC) case CHIP_BONAIRE: - case CHIP_HAWAII: case CHIP_KAVERI: case CHIP_KABINI: case CHIP_MULLINS: + /* + * We have systems in the wild with these ASICs that require + * LVDS and VGA support which is not supported with DC. + * + * Fallback to the non-DC driver here by default so as not to + * cause regressions. + */ + return amdgpu_dc > 0; + case CHIP_HAWAII: case CHIP_CARRIZO: case CHIP_STONEY: - case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: case CHIP_TONGA: case CHIP_FIJI: -#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA) - return amdgpu_dc != 0; -#endif case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: #endif @@ -2125,7 +2229,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) * amdgpu_device_init - initialize the driver * * @adev: amdgpu_device pointer - * @pdev: drm dev pointer + * @ddev: drm dev pointer * @pdev: pci dev pointer * @flags: driver flags * @@ -2216,6 +2320,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_device_ip_late_init_func_handler); + adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false; + /* Registers mapping */ /* TODO: block userspace mapping of io register */ if (adev->asic_type >= CHIP_BONAIRE) { @@ -2375,10 +2481,6 @@ fence_driver_init: goto failed; } - r = amdgpu_ib_ring_tests(adev); - if (r) - DRM_ERROR("ib ring test failed (%d).\n", r); - if (amdgpu_sriov_vf(adev)) amdgpu_virt_init_data_exchange(adev); @@ -2500,8 +2602,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev) /** * amdgpu_device_suspend - initiate device suspend * - * @pdev: drm dev pointer - * @state: suspend state + * @dev: drm dev pointer + * @suspend: suspend state + * @fbcon : notify the fbdev of suspend * * Puts the hw in the suspend state (all asics). * Returns 0 for success or an error on failure. @@ -2539,7 +2642,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb); + struct drm_framebuffer *fb = crtc->primary->fb; struct amdgpu_bo *robj; if (amdgpu_crtc->cursor_bo) { @@ -2551,10 +2654,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) } } - if (rfb == NULL || rfb->obj == NULL) { + if (fb == NULL || fb->obj[0] == NULL) { continue; } - robj = gem_to_amdgpu_bo(rfb->obj); + robj = gem_to_amdgpu_bo(fb->obj[0]); /* don't unpin kernel fb objects */ if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { r = amdgpu_bo_reserve(robj, true); @@ -2599,7 +2702,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) /** * amdgpu_device_resume - initiate device resume * - * @pdev: drm dev pointer + * @dev: drm dev pointer + * @resume: resume state + * @fbcon : notify the fbdev of resume * * Bring the hw back to operating state (all asics). * Returns 0 for success or an error on failure. @@ -2640,11 +2745,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } amdgpu_fence_driver_resume(adev); - if (resume) { - r = amdgpu_ib_ring_tests(adev); - if (r) - DRM_ERROR("ib ring test failed (%d).\n", r); - } r = amdgpu_device_ip_late_init(adev); if (r) @@ -2736,6 +2836,9 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return true; + if (amdgpu_asic_need_full_reset(adev)) + return true; + for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; @@ -2792,6 +2895,9 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) { int i; + if (amdgpu_asic_need_full_reset(adev)) + return true; + for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; @@ -3061,6 +3167,7 @@ out: * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf * * @adev: amdgpu device pointer + * @from_hypervisor: request from hypervisor * * do VF FLR and reinitialize Asic * return 0 means successed otherwise failed @@ -3087,20 +3194,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, /* now we are okay to resume SMC/CP/SDMA */ r = amdgpu_device_ip_reinit_late_sriov(adev); - amdgpu_virt_release_full_gpu(adev, true); if (r) goto error; amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); +error: + amdgpu_virt_release_full_gpu(adev, true); if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { atomic_inc(&adev->vram_lost_counter); r = amdgpu_device_handle_vram_lost(adev); } -error: - return r; } @@ -3109,7 +3215,7 @@ error: * * @adev: amdgpu device pointer * @job: which job trigger hang - * @force forces reset regardless of amdgpu_gpu_recovery + * @force: forces reset regardless of amdgpu_gpu_recovery * * Attempt to reset the GPU if it has hung (all asics). * Returns 0 for success or an error on failure. @@ -3117,7 +3223,6 @@ error: int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool force) { - struct drm_atomic_state *state = NULL; int i, r, resched; if (!force && !amdgpu_device_ip_check_soft_reset(adev)) { @@ -3140,10 +3245,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); - /* store modesetting */ - if (amdgpu_device_has_dc_support(adev)) - state = drm_atomic_helper_suspend(adev->ddev); - /* block all schedulers and reset given job's ring */ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; @@ -3183,10 +3284,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, kthread_unpark(ring->sched.thread); } - if (amdgpu_device_has_dc_support(adev)) { - if (drm_atomic_helper_resume(adev->ddev, state)) - dev_info(adev->dev, "drm resume failed:%d\n", r); - } else { + if (!amdgpu_device_has_dc_support(adev)) { drm_helper_resume_force_mode(adev->ddev); } @@ -3217,8 +3315,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) { - u32 mask; - int ret; + struct pci_dev *pdev; + enum pci_bus_speed speed_cap; + enum pcie_link_width link_width; if (amdgpu_pcie_gen_cap) adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; @@ -3236,27 +3335,61 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) } if (adev->pm.pcie_gen_mask == 0) { - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (!ret) { - adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | + /* asic caps */ + pdev = adev->pdev; + speed_cap = pcie_get_speed_cap(pdev); + if (speed_cap == PCI_SPEED_UNKNOWN) { + adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); - - if (mask & DRM_PCIE_SPEED_25) - adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; - if (mask & DRM_PCIE_SPEED_50) - adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; - if (mask & DRM_PCIE_SPEED_80) - adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; } else { - adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; + if (speed_cap == PCIE_SPEED_16_0GT) + adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4); + else if (speed_cap == PCIE_SPEED_8_0GT) + adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); + else if (speed_cap == PCIE_SPEED_5_0GT) + adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2); + else + adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; + } + /* platform caps */ + pdev = adev->ddev->pdev->bus->self; + speed_cap = pcie_get_speed_cap(pdev); + if (speed_cap == PCI_SPEED_UNKNOWN) { + adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); + } else { + if (speed_cap == PCIE_SPEED_16_0GT) + adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); + else if (speed_cap == PCIE_SPEED_8_0GT) + adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); + else if (speed_cap == PCIE_SPEED_5_0GT) + adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); + else + adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; + } } if (adev->pm.pcie_mlw_mask == 0) { - ret = drm_pcie_get_max_link_width(adev->ddev, &mask); - if (!ret) { - switch (mask) { - case 32: + pdev = adev->ddev->pdev->bus->self; + link_width = pcie_get_width_cap(pdev); + if (link_width == PCIE_LNK_WIDTH_UNKNOWN) { + adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; + } else { + switch (link_width) { + case PCIE_LNK_X32: adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | @@ -3265,7 +3398,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; - case 16: + case PCIE_LNK_X16: adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | @@ -3273,36 +3406,34 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; - case 12: + case PCIE_LNK_X12: adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; - case 8: + case PCIE_LNK_X8: adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; - case 4: + case PCIE_LNK_X4: adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; - case 2: + case PCIE_LNK_X2: adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); break; - case 1: + case PCIE_LNK_X1: adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; break; default: break; } - } else { - adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; } } } |