diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 227 |
1 files changed, 195 insertions, 32 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index ab05121b9272..21adb1b6e5cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -22,7 +22,29 @@ */ #include "amdgpu.h" -#define MAX_KIQ_REG_WAIT 100000 +#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ +#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ +#define MAX_KIQ_REG_TRY 20 + +uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) +{ + uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; + + addr -= AMDGPU_VA_RESERVED_SIZE; + + if (addr >= AMDGPU_VA_HOLE_START) + addr |= AMDGPU_VA_HOLE_END; + + return addr; +} + +bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) +{ + /* By now all MMIO pages except mailbox are blocked */ + /* if blocking is enabled in hypervisor. Choose the */ + /* SCRATCH_REG0 to test. */ + return RREG32_NO_KIQ(0xc040) == 0xffffffff; +} int amdgpu_allocate_static_csa(struct amdgpu_device *adev) { @@ -39,16 +61,22 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev) return 0; } +void amdgpu_free_static_csa(struct amdgpu_device *adev) { + amdgpu_bo_free_kernel(&adev->virt.csa_obj, + &adev->virt.csa_vmid0_addr, + NULL); +} + /* * amdgpu_map_static_csa should be called during amdgpu_vm_init - * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" - * to this VM, and each command submission of GFX should use this virtual - * address within META_DATA init package to support SRIOV gfx preemption. + * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command + * submission of GFX should use this virtual address within META_DATA init + * package to support SRIOV gfx preemption. */ - int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va **bo_va) { + uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK; struct ww_acquire_ctx ticket; struct list_head list; struct amdgpu_bo_list_entry pd; @@ -76,7 +104,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, return -ENOMEM; } - r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR, + r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); @@ -85,7 +113,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE, + r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE, AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_EXECUTABLE); @@ -107,59 +135,102 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) adev->enable_virtual_display = true; adev->cg_flags = 0; adev->pg_flags = 0; - - mutex_init(&adev->virt.lock_reset); } uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) { - signed long r; - uint32_t val; - struct dma_fence *f; + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_rreg); - mutex_lock(&kiq->ring_mutex); + spin_lock_irqsave(&kiq->ring_lock, flags); amdgpu_ring_alloc(ring, 32); amdgpu_ring_emit_rreg(ring, reg); - amdgpu_fence_emit(ring, &f); + amdgpu_fence_emit_polling(ring, &seq); amdgpu_ring_commit(ring); - mutex_unlock(&kiq->ring_mutex); - - r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT)); - dma_fence_put(f); - if (r < 1) { - DRM_ERROR("wait for kiq fence error: %ld.\n", r); - return ~0; + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_read; + + if (in_interrupt()) + might_sleep(); + + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); } - val = adev->wb.wb[adev->virt.reg_val_offs]; + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_read; - return val; + return adev->wb.wb[adev->virt.reg_val_offs]; + +failed_kiq_read: + pr_err("failed to read reg:%x\n", reg); + return ~0; } void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) { - signed long r; - struct dma_fence *f; + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_wreg); - mutex_lock(&kiq->ring_mutex); + spin_lock_irqsave(&kiq->ring_lock, flags); amdgpu_ring_alloc(ring, 32); amdgpu_ring_emit_wreg(ring, reg, v); - amdgpu_fence_emit(ring, &f); + amdgpu_fence_emit_polling(ring, &seq); amdgpu_ring_commit(ring); - mutex_unlock(&kiq->ring_mutex); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_write; + + if (in_interrupt()) + might_sleep(); + + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_write; - r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT)); - if (r < 1) - DRM_ERROR("wait for kiq fence error: %ld.\n", r); - dma_fence_put(f); + return; + +failed_kiq_write: + pr_err("failed to write reg:%x\n", reg); } /** @@ -230,6 +301,22 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) } /** + * amdgpu_virt_wait_reset() - wait for reset gpu completed + * @amdgpu: amdgpu device. + * Wait for GPU reset completed. + * Return: Zero if reset success, otherwise will return error. + */ +int amdgpu_virt_wait_reset(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + + if (!virt->ops || !virt->ops->wait_reset) + return -EINVAL; + + return virt->ops->wait_reset(adev); +} + +/** * amdgpu_virt_alloc_mm_table() - alloc memory for mm table * @amdgpu: amdgpu device. * MM table is used by UVD and VCE for its initialization @@ -274,3 +361,79 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev) (void *)&adev->virt.mm_table.cpu_addr); adev->virt.mm_table.gpu_addr = 0; } + + +int amdgpu_virt_fw_reserve_get_checksum(void *obj, + unsigned long obj_size, + unsigned int key, + unsigned int chksum) +{ + unsigned int ret = key; + unsigned long i = 0; + unsigned char *pos; + + pos = (char *)obj; + /* calculate checksum */ + for (i = 0; i < obj_size; ++i) + ret += *(pos + i); + /* minus the chksum itself */ + pos = (char *)&chksum; + for (i = 0; i < sizeof(chksum); ++i) + ret -= *(pos + i); + return ret; +} + +void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) +{ + uint32_t pf2vf_size = 0; + uint32_t checksum = 0; + uint32_t checkval; + char *str; + + adev->virt.fw_reserve.p_pf2vf = NULL; + adev->virt.fw_reserve.p_vf2pf = NULL; + + if (adev->fw_vram_usage.va != NULL) { + adev->virt.fw_reserve.p_pf2vf = + (struct amdgim_pf2vf_info_header *)( + adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET); + AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); + AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); + AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature); + + /* pf2vf message must be in 4K */ + if (pf2vf_size > 0 && pf2vf_size < 4096) { + checkval = amdgpu_virt_fw_reserve_get_checksum( + adev->virt.fw_reserve.p_pf2vf, pf2vf_size, + adev->virt.fw_reserve.checksum_key, checksum); + if (checkval == checksum) { + adev->virt.fw_reserve.p_vf2pf = + ((void *)adev->virt.fw_reserve.p_pf2vf + + pf2vf_size); + memset((void *)adev->virt.fw_reserve.p_vf2pf, 0, + sizeof(amdgim_vf2pf_info)); + AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version, + AMDGPU_FW_VRAM_VF2PF_VER); + AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size, + sizeof(amdgim_vf2pf_info)); + AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version, + &str); +#ifdef MODULE + if (THIS_MODULE->version != NULL) + strcpy(str, THIS_MODULE->version); + else +#endif + strcpy(str, "N/A"); + AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert, + 0); + AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum, + amdgpu_virt_fw_reserve_get_checksum( + adev->virt.fw_reserve.p_vf2pf, + pf2vf_size, + adev->virt.fw_reserve.checksum_key, 0)); + } + } + } +} + + |