aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c95
1 files changed, 41 insertions, 54 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d0d0ea565e3d..d8f1335bc68f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -55,8 +55,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
- /* One for TTM and one for the CS job */
- p->uf_entry.tv.num_shared = 2;
+ /* One for TTM and two for the CS job */
+ p->uf_entry.tv.num_shared = 3;
drm_gem_object_put(gobj);
@@ -116,7 +116,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
int ret;
if (cs->in.num_chunks == 0)
- return 0;
+ return -EINVAL;
chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (!chunk_array)
@@ -519,6 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r;
}
+ mutex_lock(&p->bo_list->bo_list_mutex);
+
/* One for TTM and one for the CS job */
amdgpu_bo_list_for_each_entry(e, p->bo_list)
e->tv.num_shared = 2;
@@ -545,14 +547,15 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
GFP_KERNEL | __GFP_ZERO);
if (!e->user_pages) {
DRM_ERROR("kvmalloc_array failure\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto out_free_user_pages;
}
r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
if (r) {
kvfree(e->user_pages);
e->user_pages = NULL;
- return r;
+ goto out_free_user_pages;
}
for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
@@ -569,21 +572,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
- goto out;
+ goto out_free_user_pages;
}
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e->bo_va = amdgpu_vm_bo_find(vm, bo);
-
- if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
- e->chain = dma_fence_chain_alloc();
- if (!e->chain) {
- r = -ENOMEM;
- goto error_validate;
- }
- }
}
/* Move fence waiting after getting reservation lock of
@@ -644,14 +639,22 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
error_validate:
+ if (r)
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+
+out_free_user_pages:
if (r) {
- amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- dma_fence_chain_free(e->chain);
- e->chain = NULL;
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+ if (!e->user_pages)
+ continue;
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ kvfree(e->user_pages);
+ e->user_pages = NULL;
}
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+ mutex_unlock(&p->bo_list->bo_list_mutex);
}
-out:
return r;
}
@@ -691,15 +694,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
unsigned i;
if (error && backoff) {
- struct amdgpu_bo_list_entry *e;
-
- amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
- dma_fence_chain_free(e->chain);
- e->chain = NULL;
- }
-
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
+ mutex_unlock(&parser->bo_list->bo_list_mutex);
}
for (i = 0; i < parser->num_post_deps; i++) {
@@ -809,22 +806,22 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
if (r)
return r;
- r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
if (r)
return r;
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -839,13 +836,17 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (bo_va == NULL)
continue;
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
- if (r)
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r) {
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return r;
+ }
- r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
- if (r)
+ r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
+ if (r) {
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return r;
+ }
}
r = amdgpu_vm_handle_moved(adev, vm);
@@ -856,7 +857,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update);
+ r = amdgpu_sync_fence(&p->job->sync, vm->last_update);
if (r)
return r;
@@ -1260,7 +1261,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
p->fence = dma_fence_get(&job->base.s_fence->finished);
- amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
+ seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
amdgpu_cs_post_dependencies(p);
if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
@@ -1280,27 +1281,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
- amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- struct dma_resv *resv = e->tv.bo->base.resv;
- struct dma_fence_chain *chain = e->chain;
-
- if (!chain)
- continue;
-
- /*
- * Work around dma_resv shortcomings by wrapping up the
- * submission in a dma_fence_chain and add it as exclusive
- * fence.
- */
- dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
- dma_fence_get(p->fence), 1);
-
- rcu_assign_pointer(resv->fence_excl, &chain->base);
- e->chain = NULL;
- }
+ /* Make sure all BOs are remembered as writers */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->tv.num_shared = 0;
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
mutex_unlock(&p->adev->notifier_lock);
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return 0;