diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/nv.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/nv.c | 71 |
1 files changed, 23 insertions, 48 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 4d1402356262..03462c857498 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -69,75 +69,40 @@ static const struct amd_ip_funcs nv_common_ip_funcs; */ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) { - unsigned long flags, address, data; - u32 r; + unsigned long address, data; address = adev->nbio.funcs->get_pcie_index_offset(adev); data = adev->nbio.funcs->get_pcie_data_offset(adev); - spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(address, reg); - (void)RREG32(address); - r = RREG32(data); - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); - return r; + return amdgpu_device_indirect_rreg(adev, address, data, reg); } static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { - unsigned long flags, address, data; + unsigned long address, data; address = adev->nbio.funcs->get_pcie_index_offset(adev); data = adev->nbio.funcs->get_pcie_data_offset(adev); - spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(address, reg); - (void)RREG32(address); - WREG32(data, v); - (void)RREG32(data); - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + amdgpu_device_indirect_wreg(adev, address, data, reg, v); } static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg) { - unsigned long flags, address, data; - u64 r; + unsigned long address, data; address = adev->nbio.funcs->get_pcie_index_offset(adev); data = adev->nbio.funcs->get_pcie_data_offset(adev); - spin_lock_irqsave(&adev->pcie_idx_lock, flags); - /* read low 32 bit */ - WREG32(address, reg); - (void)RREG32(address); - r = RREG32(data); - - /* read high 32 bit*/ - WREG32(address, reg + 4); - (void)RREG32(address); - r |= ((u64)RREG32(data) << 32); - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); - return r; + return amdgpu_device_indirect_rreg64(adev, address, data, reg); } static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) { - unsigned long flags, address, data; + unsigned long address, data; address = adev->nbio.funcs->get_pcie_index_offset(adev); data = adev->nbio.funcs->get_pcie_data_offset(adev); - spin_lock_irqsave(&adev->pcie_idx_lock, flags); - /* write low 32 bit */ - WREG32(address, reg); - (void)RREG32(address); - WREG32(data, (u32)(v & 0xffffffffULL)); - (void)RREG32(data); - - /* write high 32 bit */ - WREG32(address, reg + 4); - (void)RREG32(address); - WREG32(data, (u32)(v >> 32)); - (void)RREG32(data); - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + amdgpu_device_indirect_wreg64(adev, address, data, reg, v); } static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) @@ -311,7 +276,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev) /* disable BM */ pci_clear_master(adev->pdev); - pci_save_state(adev->pdev); + amdgpu_device_cache_pci_state(adev->pdev); if (amdgpu_dpm_is_mode1_reset_supported(adev)) { dev_info(adev->dev, "GPU smu mode1 reset\n"); @@ -323,7 +288,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev) if (ret) dev_err(adev->dev, "GPU mode1 reset failed\n"); - pci_restore_state(adev->pdev); + amdgpu_device_load_pci_state(adev->pdev); /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { @@ -490,6 +455,14 @@ void nv_set_virt_ops(struct amdgpu_device *adev) adev->virt.ops = &xgpu_nv_virt_ops; } +static bool nv_is_blockchain_sku(struct pci_dev *pdev) +{ + if (pdev->device == 0x731E && + (pdev->revision == 0xC6 || pdev->revision == 0xC7)) + return true; + return false; +} + int nv_set_ip_blocks(struct amdgpu_device *adev) { int r; @@ -518,7 +491,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) + else if (amdgpu_device_has_dc_support(adev) && + !nv_is_blockchain_sku(adev->pdev)) amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); @@ -526,7 +500,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + if (!nv_is_blockchain_sku(adev->pdev)) + amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); if (adev->enable_mes) amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); @@ -621,7 +596,7 @@ static void nv_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) { - WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); + WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); } else { amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); |