diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 586 |
1 files changed, 417 insertions, 169 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 57ec108b5972..694c3726e0f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -71,6 +71,8 @@ #include <drm/task_barrier.h> #include <linux/pm_runtime.h> +#include <drm/drm_drv.h> + MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -82,6 +84,7 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 @@ -113,12 +116,16 @@ const char *amdgpu_asic_name[] = { "RENOIR", "ALDEBARAN", "NAVI10", + "CYAN_SKILLFISH", "NAVI14", "NAVI12", "SIENNA_CICHLID", "NAVY_FLOUNDER", "VANGOGH", "DIMGREY_CAVEFISH", + "BEIGE_GOBY", + "YELLOW_CARP", + "IP DISCOVERY", "LAST", }; @@ -262,12 +269,27 @@ bool amdgpu_device_supports_baco(struct drm_device *dev) return amdgpu_asic_supports_baco(adev); } +/** + * amdgpu_device_supports_smart_shift - Is the device dGPU with + * smart shift support + * + * @dev: drm_device pointer + * + * Returns true if the device is a dGPU with Smart Shift support, + * otherwise returns false. + */ +bool amdgpu_device_supports_smart_shift(struct drm_device *dev) +{ + return (amdgpu_device_supports_boco(dev) && + amdgpu_acpi_is_power_shift_control_supported()); +} + /* * VRAM access helper functions */ /** - * amdgpu_device_vram_access - read/write a buffer in vram + * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA * * @adev: amdgpu_device pointer * @pos: offset of the buffer in vram @@ -275,54 +297,107 @@ bool amdgpu_device_supports_baco(struct drm_device *dev) * @size: read/write size, sizeof(@buf) must > @size * @write: true - write to vram, otherwise - read from vram */ -void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, - uint32_t *buf, size_t size, bool write) +void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write) { unsigned long flags; - uint32_t hi = ~0; + uint32_t hi = ~0, tmp = 0; + uint32_t *data = buf; uint64_t last; + int idx; + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return; + + BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4)); + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + for (last = pos + size; pos < last; pos += 4) { + tmp = pos >> 31; + + WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); + if (tmp != hi) { + WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); + hi = tmp; + } + if (write) + WREG32_NO_KIQ(mmMM_DATA, *data++); + else + *data++ = RREG32_NO_KIQ(mmMM_DATA); + } + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + drm_dev_exit(idx); +} +/** + * amdgpu_device_vram_access - access vram by vram aperature + * + * @adev: amdgpu_device pointer + * @pos: offset of the buffer in vram + * @buf: virtual address of the buffer in system memory + * @size: read/write size, sizeof(@buf) must > @size + * @write: true - write to vram, otherwise - read from vram + * + * The return value means how many bytes have been transferred. + */ +size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write) +{ #ifdef CONFIG_64BIT + void __iomem *addr; + size_t count = 0; + uint64_t last; + + if (!adev->mman.aper_base_kaddr) + return 0; + last = min(pos + size, adev->gmc.visible_vram_size); if (last > pos) { - void __iomem *addr = adev->mman.aper_base_kaddr + pos; - size_t count = last - pos; + addr = adev->mman.aper_base_kaddr + pos; + count = last - pos; if (write) { memcpy_toio(addr, buf, count); mb(); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); } else { - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_device_invalidate_hdp(adev, NULL); mb(); memcpy_fromio(buf, addr, count); } - if (count == size) - return; - - pos += count; - buf += count / 4; - size -= count; } + + return count; +#else + return 0; #endif +} - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - for (last = pos + size; pos < last; pos += 4) { - uint32_t tmp = pos >> 31; +/** + * amdgpu_device_vram_access - read/write a buffer in vram + * + * @adev: amdgpu_device pointer + * @pos: offset of the buffer in vram + * @buf: virtual address of the buffer in system memory + * @size: read/write size, sizeof(@buf) must > @size + * @write: true - write to vram, otherwise - read from vram + */ +void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write) +{ + size_t count; - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); - if (tmp != hi) { - WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); - hi = tmp; - } - if (write) - WREG32_NO_KIQ(mmMM_DATA, *buf++); - else - *buf++ = RREG32_NO_KIQ(mmMM_DATA); + /* try to using vram apreature to access vram first */ + count = amdgpu_device_aper_access(adev, pos, buf, size, write); + size -= count; + if (size) { + /* using MM to access rest vram */ + pos += count; + buf += count; + amdgpu_device_mm_access(adev, pos, buf, size, write); } - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); } /* @@ -332,7 +407,7 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, /* Check if hw access should be skipped because of hotplug or device error */ bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) { - if (adev->in_pci_err_recovery) + if (adev->no_hw_access) return true; #ifdef CONFIG_LOCKDEP @@ -490,7 +565,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->is_rlcg_access_range) { if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) - return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0); + return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0); } else { writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); } @@ -1238,15 +1313,16 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) /** * amdgpu_device_vga_set_decode - enable/disable vga decode * - * @cookie: amdgpu_device pointer + * @pdev: PCI device pointer * @state: enable/disable vga decode * * Enable/disable vga decode (all asics). * Returns VGA resource flags. */ -static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state) +static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, + bool state) { - struct amdgpu_device *adev = cookie; + struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); amdgpu_asic_set_vga_state(adev, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | @@ -1341,6 +1417,42 @@ def_value: adev->pm.smu_prv_buffer_size = 0; } +static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) +{ + if (!(adev->flags & AMD_IS_APU) || + adev->asic_type < CHIP_RAVEN) + return 0; + + switch (adev->asic_type) { + case CHIP_RAVEN: + if (adev->pdev->device == 0x15dd) + adev->apu_flags |= AMD_APU_IS_RAVEN; + if (adev->pdev->device == 0x15d8) + adev->apu_flags |= AMD_APU_IS_PICASSO; + break; + case CHIP_RENOIR: + if ((adev->pdev->device == 0x1636) || + (adev->pdev->device == 0x164c)) + adev->apu_flags |= AMD_APU_IS_RENOIR; + else + adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; + break; + case CHIP_VANGOGH: + adev->apu_flags |= AMD_APU_IS_VANGOGH; + break; + case CHIP_YELLOW_CARP: + break; + case CHIP_CYAN_SKILLFISH: + if (adev->pdev->device == 0x13FE) + adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; + break; + default: + return -EINVAL; + } + + return 0; +} + /** * amdgpu_device_check_arguments - validate module params * @@ -1820,6 +1932,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: default: return 0; case CHIP_VEGA10: @@ -1857,6 +1970,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_VANGOGH: chip_name = "vangogh"; break; + case CHIP_YELLOW_CARP: + chip_name = "yellow_carp"; + break; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); @@ -2011,41 +2127,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (r) return r; break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - case CHIP_ALDEBARAN: - if (adev->flags & AMD_IS_APU) - adev->family = AMDGPU_FAMILY_RV; - else - adev->family = AMDGPU_FAMILY_AI; - - r = soc15_set_ip_blocks(adev); - if (r) - return r; - break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_VANGOGH: - if (adev->asic_type == CHIP_VANGOGH) - adev->family = AMDGPU_FAMILY_VGH; - else - adev->family = AMDGPU_FAMILY_NV; - - r = nv_set_ip_blocks(adev); + default: + r = amdgpu_discovery_set_ip_blocks(adev); if (r) return r; break; - default: - /* FIXME: not supported yet */ - return -EINVAL; } amdgpu_amdkfd_device_probe(adev); @@ -2571,34 +2657,26 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return 0; } -/** - * amdgpu_device_ip_fini - run fini for hardware IPs - * - * @adev: amdgpu_device pointer - * - * Main teardown pass for hardware IPs. The list of all the hardware - * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks - * are run. hw_fini tears down the hardware associated with each IP - * and sw_fini tears down any software state associated with each IP. - * Returns 0 on success, negative error code on failure. - */ -static int amdgpu_device_ip_fini(struct amdgpu_device *adev) +static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) { int i, r; - if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) - amdgpu_virt_release_ras_err_handler_data(adev); + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].version->funcs->early_fini) + continue; - amdgpu_ras_pre_fini(adev); + r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); + if (r) { + DRM_DEBUG("early_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } + } - if (adev->gmc.xgmi.num_physical_nodes > 1) - amdgpu_xgmi_remove_device(adev); + amdgpu_amdkfd_suspend(adev, false); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); - amdgpu_amdkfd_device_fini(adev); - /* need to disable SMC first */ for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.hw) @@ -2629,6 +2707,38 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; } + if (amdgpu_sriov_vf(adev)) { + if (amdgpu_virt_release_full_gpu(adev, false)) + DRM_ERROR("failed to release exclusive mode on fini\n"); + } + + return 0; +} + +/** + * amdgpu_device_ip_fini - run fini for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Main teardown pass for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks + * are run. hw_fini tears down the hardware associated with each IP + * and sw_fini tears down any software state associated with each IP. + * Returns 0 on success, negative error code on failure. + */ +static int amdgpu_device_ip_fini(struct amdgpu_device *adev) +{ + int i, r; + + if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) + amdgpu_virt_release_ras_err_handler_data(adev); + + amdgpu_ras_pre_fini(adev); + + if (adev->gmc.xgmi.num_physical_nodes > 1) + amdgpu_xgmi_remove_device(adev); + + amdgpu_amdkfd_device_fini_sw(adev); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.sw) @@ -2662,10 +2772,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_ras_fini(adev); - if (amdgpu_sriov_vf(adev)) - if (amdgpu_virt_release_full_gpu(adev, false)) - DRM_ERROR("failed to release exclusive mode on fini\n"); - return 0; } @@ -2690,12 +2796,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); - mutex_lock(&adev->gfx.gfx_off_mutex); - if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { - if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) - adev->gfx.gfx_off_state = true; - } - mutex_unlock(&adev->gfx.gfx_off_mutex); + WARN_ON_ONCE(adev->gfx.gfx_off_state); + WARN_ON_ONCE(adev->gfx.gfx_off_req_count); + + if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) + adev->gfx.gfx_off_state = true; } /** @@ -2856,7 +2961,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_IH, }; - for (i = 0; i < ARRAY_SIZE(ip_order); i++) { + for (i = 0; i < adev->num_ip_blocks; i++) { int j; struct amdgpu_ip_block *block; @@ -3010,6 +3115,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) { int r; + r = amdgpu_amdkfd_resume_iommu(adev); + if (r) + return r; + r = amdgpu_device_ip_resume_phase1(adev); if (r) return r; @@ -3034,7 +3143,7 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) { if (adev->is_atom_fw) { - if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) + if (amdgpu_atomfirmware_gpu_virtualization_supported(adev)) adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; } else { if (amdgpu_atombios_has_gpu_virtualization_table(adev)) @@ -3057,12 +3166,28 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) { switch (asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_HAINAN: +#endif + case CHIP_TOPAZ: + /* chips with no display hardware */ + return false; #if defined(CONFIG_DRM_AMD_DC) -#if defined(CONFIG_DRM_AMD_DC_SI) case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: + /* + * We have systems in the wild with these ASICs that require + * LVDS and VGA support which is not supported with DC. + * + * Fallback to the non-DC driver here by default so as not to + * cause regressions. + */ +#if defined(CONFIG_DRM_AMD_DC_SI) + return amdgpu_dc > 0; +#else + return false; #endif case CHIP_BONAIRE: case CHIP_KAVERI: @@ -3094,18 +3219,23 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_NAVI14: case CHIP_NAVI12: case CHIP_RENOIR: + case CHIP_CYAN_SKILLFISH: case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: #endif + default: return amdgpu_dc != 0; -#endif +#else default: if (amdgpu_dc > 0) DRM_INFO_ONCE("Display Core has been requested via kernel parameter " "but isn't supported by ASIC, ignoring\n"); return false; +#endif } } @@ -3181,8 +3311,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) int ret = 0; /* - * By default timeout for non compute jobs is 10000. - * And there is no timeout enforced on compute jobs. + * By default timeout for non compute jobs is 10000 + * and 60000 for compute jobs. * In SR-IOV or passthrough mode, timeout for compute * jobs are 60000 by default. */ @@ -3191,10 +3321,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? msecs_to_jiffies(60000) : msecs_to_jiffies(10000); - else if (amdgpu_passthrough(adev)) - adev->compute_timeout = msecs_to_jiffies(60000); else - adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; + adev->compute_timeout = msecs_to_jiffies(60000); if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { while ((timeout_setting = strsep(&input, ",")) && @@ -3208,6 +3336,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) continue; } else if (timeout < 0) { timeout = MAX_SCHEDULE_TIMEOUT; + dev_warn(adev->dev, "lockup timeout disabled"); + add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); } else { timeout = msecs_to_jiffies(timeout); } @@ -3251,7 +3381,6 @@ static const struct attribute *amdgpu_dev_attributes[] = { NULL }; - /** * amdgpu_device_init - initialize the driver * @@ -3332,6 +3461,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); + r = amdgpu_device_init_apu_flags(adev); + if (r) + return r; + r = amdgpu_device_check_arguments(adev); if (r) return r; @@ -3382,6 +3515,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->rmmio_size = pci_resource_len(adev->pdev, 2); } + for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) + atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); + adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); if (adev->rmmio == NULL) { return -ENOMEM; @@ -3389,17 +3525,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - /* enable PCIE atomic ops */ - r = pci_enable_atomic_ops_to_root(adev->pdev, - PCI_EXP_DEVCAP2_ATOMIC_COMP32 | - PCI_EXP_DEVCAP2_ATOMIC_COMP64); - if (r) { - adev->have_atomics_support = false; - DRM_INFO("PCIE atomic ops is not supported\n"); - } else { - adev->have_atomics_support = true; - } - amdgpu_device_get_pcie_info(adev); if (amdgpu_mcbp) @@ -3414,13 +3539,26 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_device_get_job_timeout_settings(adev); if (r) { dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); - goto failed_unmap; + return r; } /* early init functions */ r = amdgpu_device_ip_early_init(adev); if (r) - goto failed_unmap; + return r; + + /* enable PCIE atomic ops */ + if (amdgpu_sriov_vf(adev)) + adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) + adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags == + (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); + else + adev->have_atomics_support = + !pci_enable_atomic_ops_to_root(adev->pdev, + PCI_EXP_DEVCAP2_ATOMIC_COMP32 | + PCI_EXP_DEVCAP2_ATOMIC_COMP64); + if (!adev->have_atomics_support) + dev_info(adev->dev, "PCIE atomic ops is not supported\n"); /* doorbell bar mapping and doorbell index init*/ amdgpu_device_doorbell_init(adev); @@ -3505,9 +3643,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, fence_driver_init: /* Fence driver */ - r = amdgpu_fence_driver_init(adev); + r = amdgpu_fence_driver_sw_init(adev); if (r) { - dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); + dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); goto failed; } @@ -3534,6 +3672,8 @@ fence_driver_init: goto release_ras_con; } + amdgpu_fence_driver_hw_init(adev); + dev_info(adev->dev, "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", adev->gfx.config.max_shader_engines, @@ -3625,7 +3765,7 @@ fence_driver_init: /* this will fail for cards that aren't VGA class devices, just * ignore it */ if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) - vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); + vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); if (amdgpu_device_supports_px(ddev)) { px = true; @@ -3646,11 +3786,28 @@ release_ras_con: failed: amdgpu_vf_error_trans_all(adev); -failed_unmap: + return r; +} + +static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) +{ + /* Clear all CPU mappings pointing to this device */ + unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); + + /* Unmap all mapped bars - Doorbell, registers and VRAM */ + amdgpu_device_doorbell_fini(adev); + iounmap(adev->rmmio); adev->rmmio = NULL; + if (adev->mman.aper_base_kaddr) + iounmap(adev->mman.aper_base_kaddr); + adev->mman.aper_base_kaddr = NULL; - return r; + /* Memory manager related */ + if (!adev->gmc.xgmi.connected_to_cpu) { + arch_phys_wc_del(adev->gmc.vram_mtrr); + arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); + } } /** @@ -3661,15 +3818,16 @@ failed_unmap: * Tear down the driver info (all asics). * Called at driver shutdown. */ -void amdgpu_device_fini(struct amdgpu_device *adev) +void amdgpu_device_fini_hw(struct amdgpu_device *adev) { dev_info(adev->dev, "amdgpu: finishing device.\n"); flush_delayed_work(&adev->delayed_init_work); - ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); + if (adev->mman.initialized) { + flush_delayed_work(&adev->mman.bdev.wq); + ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); + } adev->shutdown = true; - kfree(adev->pci_state); - /* make sure IB test finished before entering exclusive mode * to avoid preemption on IB test * */ @@ -3681,15 +3839,35 @@ void amdgpu_device_fini(struct amdgpu_device *adev) /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ - if (!amdgpu_device_has_dc_support(adev)) + if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) drm_helper_force_disable_all(adev_to_drm(adev)); else drm_atomic_helper_shutdown(adev_to_drm(adev)); } - amdgpu_fence_driver_fini(adev); + amdgpu_fence_driver_hw_fini(adev); + if (adev->pm_sysfs_en) amdgpu_pm_sysfs_fini(adev); + if (adev->ucode_sysfs_en) + amdgpu_ucode_sysfs_fini(adev); + sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); + amdgpu_fbdev_fini(adev); + + amdgpu_device_ip_fini_early(adev); + + amdgpu_irq_fini_hw(adev); + + ttm_device_clear_dma_mappings(&adev->mman.bdev); + + amdgpu_gart_dummy_page_fini(adev); + + amdgpu_device_unmap_mmio(adev); +} + +void amdgpu_device_fini_sw(struct amdgpu_device *adev) +{ + amdgpu_fence_driver_sw_fini(adev); amdgpu_device_ip_fini(adev); release_firmware(adev->firmware.gpu_info_fw); adev->firmware.gpu_info_fw = NULL; @@ -3711,21 +3889,36 @@ void amdgpu_device_fini(struct amdgpu_device *adev) vga_switcheroo_fini_domain_pm_ops(adev->dev); } if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) - vga_client_register(adev->pdev, NULL, NULL, NULL); - iounmap(adev->rmmio); - adev->rmmio = NULL; - amdgpu_device_doorbell_fini(adev); + vga_client_unregister(adev->pdev); - if (adev->ucode_sysfs_en) - amdgpu_ucode_sysfs_fini(adev); - - sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); if (adev->mman.discovery_bin) amdgpu_discovery_fini(adev); + + kfree(adev->pci_state); + } +/** + * amdgpu_device_evict_resources - evict device resources + * @adev: amdgpu device object + * + * Evicts all ttm device resources(vram BOs, gart table) from the lru list + * of the vram memory type. Mainly used for evicting device resources + * at suspend time. + * + */ +static void amdgpu_device_evict_resources(struct amdgpu_device *adev) +{ + /* No need to evict vram on APUs for suspend to ram */ + if (adev->in_s3 && (adev->flags & AMD_IS_APU)) + return; + + if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) + DRM_WARN("evicting device resources failed\n"); + +} /* * Suspend & resume. @@ -3743,12 +3936,15 @@ void amdgpu_device_fini(struct amdgpu_device *adev) int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) { struct amdgpu_device *adev = drm_to_adev(dev); - int r; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; adev->in_suspend = true; + + if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) + DRM_WARN("smart shift update failed\n"); + drm_kms_helper_poll_disable(dev); if (fbcon) @@ -3758,22 +3954,21 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) amdgpu_ras_suspend(adev); - r = amdgpu_device_ip_suspend_phase1(adev); + amdgpu_device_ip_suspend_phase1(adev); if (!adev->in_s0ix) amdgpu_amdkfd_suspend(adev, adev->in_runpm); - /* evict vram memory */ - amdgpu_bo_evict_vram(adev); + /* First evict vram memory */ + amdgpu_device_evict_resources(adev); - amdgpu_fence_driver_suspend(adev); + amdgpu_fence_driver_hw_fini(adev); - r = amdgpu_device_ip_suspend_phase2(adev); - /* evict remaining vram memory - * This second call to evict vram is to evict the gart page table - * using the CPU. + amdgpu_device_ip_suspend_phase2(adev); + /* This second call to evict device resources is to evict + * the gart page table using the CPU. */ - amdgpu_bo_evict_vram(adev); + amdgpu_device_evict_resources(adev); return 0; } @@ -3811,8 +4006,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); return r; } - amdgpu_fence_driver_resume(adev); - + amdgpu_fence_driver_hw_init(adev); r = amdgpu_device_ip_late_init(adev); if (r) @@ -3858,6 +4052,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) #endif adev->in_suspend = false; + if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) + DRM_WARN("smart shift update failed\n"); + return 0; } @@ -4031,6 +4228,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) { struct dma_fence *fence = NULL, *next = NULL; struct amdgpu_bo *shadow; + struct amdgpu_bo_vm *vmbo; long r = 1, tmo; if (amdgpu_sriov_runtime(adev)) @@ -4040,12 +4238,12 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) dev_info(adev->dev, "recover vram bo from shadow start\n"); mutex_lock(&adev->shadow_list_lock); - list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { - + list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { + shadow = &vmbo->bo; /* No need to recover an evicted BO */ - if (shadow->tbo.mem.mem_type != TTM_PL_TT || - shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || - shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) + if (shadow->tbo.resource->mem_type != TTM_PL_TT || + shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || + shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) continue; r = amdgpu_bo_restore_shadow(shadow, &next); @@ -4097,6 +4295,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, { int r; + amdgpu_amdkfd_pre_reset(adev); + if (from_hypervisor) r = amdgpu_virt_request_full_gpu(adev, true); else @@ -4104,8 +4304,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r; - amdgpu_amdkfd_pre_reset(adev); - /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) @@ -4210,6 +4408,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: case CHIP_VANGOGH: case CHIP_ALDEBARAN: break; @@ -4276,10 +4475,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (reset_context->reset_req_dev == adev) job = reset_context->job; - /* no need to dump if device is not in good state during probe period */ - if (!adev->gmc.xgmi.pending_reset) - amdgpu_debugfs_wait_dump(adev); - if (amdgpu_sriov_vf(adev)) { /* stop the data exchange thread */ amdgpu_virt_fini_data_exchange(adev); @@ -4292,11 +4487,15 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (!ring || !ring->sched.thread) continue; + /*clear job fence from fence drv to avoid force_completion + *leave NULL and vm flush fence in fence drv */ + amdgpu_fence_driver_clear_job_fences(ring); + /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); } - if(job) + if (job && job->vm) drm_sched_increase_karma(&job->base); r = amdgpu_reset_prepare_hwcontext(adev, reset_context); @@ -4408,6 +4607,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, dev_warn(tmp_adev->dev, "asic atom init failed!"); } else { dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_amdkfd_resume_iommu(tmp_adev); + if (r) + goto out; + r = amdgpu_device_ip_resume_phase1(tmp_adev); if (r) goto out; @@ -4634,7 +4837,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) return 0; } -void amdgpu_device_recheck_guilty_jobs( +static void amdgpu_device_recheck_guilty_jobs( struct amdgpu_device *adev, struct list_head *device_list_handle, struct amdgpu_reset_context *reset_context) { @@ -4655,6 +4858,9 @@ void amdgpu_device_recheck_guilty_jobs( /* clear job's guilty and depend the folowing step to decide the real one */ drm_sched_reset_karma(s_job); + /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get + * to make sure fence is balanced */ + dma_fence_get(s_job->s_fence->parent); drm_sched_resubmit_jobs_ext(&ring->sched, 1); ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); @@ -4690,6 +4896,7 @@ retry: /* got the hw fence, signal finished fence */ atomic_dec(ring->sched.score); + dma_fence_put(s_job->s_fence->parent); dma_fence_get(&s_job->s_fence->finished); dma_fence_signal(&s_job->s_fence->finished); dma_fence_put(&s_job->s_fence->finished); @@ -4760,7 +4967,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", job ? job->base.id : -1, hive->hive_id); amdgpu_put_xgmi_hive(hive); - if (job) + if (job && job->vm) drm_sched_increase_karma(&job->base); return 0; } @@ -4784,7 +4991,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, job ? job->base.id : -1); /* even we skipped this reset, still need to set the job to guilty */ - if (job) + if (job && job->vm) drm_sched_increase_karma(&job->base); goto skip_recovery; } @@ -4884,7 +5091,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); /* Actual ASIC resets if needed.*/ - /* TODO Implement XGMI hive reset logic for SRIOV */ + /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { r = amdgpu_device_reset_sriov(adev, job ? false : true); if (r) @@ -4925,7 +5132,7 @@ skip_hw_reset: drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); } - if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { + if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); } @@ -4937,6 +5144,8 @@ skip_hw_reset: amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); + if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) + DRM_WARN("smart shift update failed\n"); } } @@ -4944,7 +5153,7 @@ skip_sched_resume: list_for_each_entry(tmp_adev, device_list_handle, reset_list) { /* unlock kfd: SRIOV would do it separately */ if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) - amdgpu_amdkfd_post_reset(tmp_adev); + amdgpu_amdkfd_post_reset(tmp_adev); /* kfd_post_reset will do nothing if kfd device is not initialized, * need to bring up kfd here if it's not be initialized before @@ -5125,7 +5334,8 @@ int amdgpu_device_baco_enter(struct drm_device *dev) if (!amdgpu_device_supports_baco(adev_to_drm(adev))) return -ENOTSUPP; - if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt) + if (ras && adev->ras_enabled && + adev->nbio.funcs->enable_doorbell_interrupt) adev->nbio.funcs->enable_doorbell_interrupt(adev, false); return amdgpu_dpm_baco_enter(adev); @@ -5144,9 +5354,14 @@ int amdgpu_device_baco_exit(struct drm_device *dev) if (ret) return ret; - if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt) + if (ras && adev->ras_enabled && + adev->nbio.funcs->enable_doorbell_interrupt) adev->nbio.funcs->enable_doorbell_interrupt(adev, true); + if (amdgpu_passthrough(adev) && + adev->nbio.funcs->clear_doorbell_interrupt) + adev->nbio.funcs->clear_doorbell_interrupt(adev); + return 0; } @@ -5186,6 +5401,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta return PCI_ERS_RESULT_DISCONNECT; } + adev->pci_channel_state = state; + switch (state) { case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; @@ -5290,9 +5507,9 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); - adev->in_pci_err_recovery = true; + adev->no_hw_access = true; r = amdgpu_device_pre_asic_reset(adev, &reset_context); - adev->in_pci_err_recovery = false; + adev->no_hw_access = false; if (r) goto out; @@ -5328,6 +5545,10 @@ void amdgpu_pci_resume(struct pci_dev *pdev) DRM_INFO("PCI error: resume callback!!\n"); + /* Only continue execution for the case of pci_channel_io_frozen */ + if (adev->pci_channel_state != pci_channel_io_frozen) + return; + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; @@ -5387,4 +5608,31 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev) return true; } +void amdgpu_device_flush_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ +#ifdef CONFIG_X86_64 + if (adev->flags & AMD_IS_APU) + return; +#endif + if (adev->gmc.xgmi.connected_to_cpu) + return; + + if (ring && ring->funcs->emit_hdp_flush) + amdgpu_ring_emit_hdp_flush(ring); + else + amdgpu_asic_flush_hdp(adev, ring); +} + +void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ +#ifdef CONFIG_X86_64 + if (adev->flags & AMD_IS_APU) + return; +#endif + if (adev->gmc.xgmi.connected_to_cpu) + return; + amdgpu_asic_invalidate_hdp(adev, ring); +} |