aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c69
1 files changed, 47 insertions, 22 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 659997bfff30..311589e02d17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -149,7 +149,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
- if (vm && !job->vm_id) {
+ if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
@@ -164,7 +164,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
if (ring->funcs->emit_pipeline_sync && job &&
- ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
+ ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
amdgpu_vm_need_pipeline_sync(ring, job))) {
need_pipe_sync = true;
dma_fence_put(tmp);
@@ -181,15 +181,18 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
}
- if (ring->funcs->init_cond_exec)
+ if (job && ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
- if (ring->funcs->emit_hdp_flush
#ifdef CONFIG_X86_64
- && !(adev->flags & AMD_IS_APU)
+ if (!(adev->flags & AMD_IS_APU))
#endif
- )
- amdgpu_ring_emit_hdp_flush(ring);
+ {
+ if (ring->funcs->emit_hdp_flush)
+ amdgpu_ring_emit_hdp_flush(ring);
+ else
+ amdgpu_asic_flush_hdp(adev, ring);
+ }
skip_preamble = ring->current_ctx == fence_ctx;
need_ctx_switch = ring->current_ctx != fence_ctx;
@@ -211,7 +214,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
- amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
+ amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
need_ctx_switch);
need_ctx_switch = false;
}
@@ -219,19 +222,16 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->emit_tmz)
amdgpu_ring_emit_tmz(ring, false);
- if (ring->funcs->emit_hdp_invalidate
#ifdef CONFIG_X86_64
- && !(adev->flags & AMD_IS_APU)
+ if (!(adev->flags & AMD_IS_APU))
#endif
- )
- amdgpu_ring_emit_hdp_invalidate(ring);
+ amdgpu_asic_invalidate_hdp(adev, ring);
r = amdgpu_fence_emit(ring, f);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
- if (job && job->vm_id)
- amdgpu_vm_reset_id(adev, ring->funcs->vmhub,
- job->vm_id);
+ if (job && job->vmid)
+ amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
amdgpu_ring_undo(ring);
return r;
}
@@ -279,11 +279,6 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
return r;
}
- r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo);
- if (r) {
- return r;
- }
-
adev->ib_pool_ready = true;
if (amdgpu_debugfs_sa_init(adev)) {
dev_err(adev->dev, "failed to register debugfs file for SA\n");
@@ -302,7 +297,6 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
{
if (adev->ib_pool_ready) {
- amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo);
amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
adev->ib_pool_ready = false;
}
@@ -322,14 +316,45 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
unsigned i;
int r, ret = 0;
+ long tmo_gfx, tmo_mm;
+
+ tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
+ if (amdgpu_sriov_vf(adev)) {
+ /* for MM engines in hypervisor side they are not scheduled together
+ * with CP and SDMA engines, so even in exclusive mode MM engine could
+ * still running on other VF thus the IB TEST TIMEOUT for MM engines
+ * under SR-IOV should be set to a long time. 8 sec should be enough
+ * for the MM comes back to this VF.
+ */
+ tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
+ }
+
+ if (amdgpu_sriov_runtime(adev)) {
+ /* for CP & SDMA engines since they are scheduled together so
+ * need to make the timeout width enough to cover the time
+ * cost waiting for it coming back under RUNTIME only
+ */
+ tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
+ }
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
+ long tmo;
if (!ring || !ring->ready)
continue;
- r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
+ /* MM engine need more time */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ tmo = tmo_mm;
+ else
+ tmo = tmo_gfx;
+
+ r = amdgpu_ring_test_ib(ring, tmo);
if (r) {
ring->ready = false;