diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 267 |
1 files changed, 134 insertions, 133 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2eb2c66843a8..49dd9aa8da70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -65,6 +65,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, } amdgpu_sync_create(&p->sync); + drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); return 0; } @@ -112,6 +113,9 @@ static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p, if (r < 0) return r; + if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type)) + return -EINVAL; + ++(num_ibs[r]); p->gang_leader_idx = r; return 0; @@ -122,7 +126,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, uint32_t *offset) { struct drm_gem_object *gobj; - struct amdgpu_bo *bo; unsigned long size; int r; @@ -130,21 +133,16 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, if (gobj == NULL) return -EINVAL; - bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); - p->uf_entry.priority = 0; - p->uf_entry.tv.bo = &bo->tbo; - /* One for TTM and two for the CS job */ - p->uf_entry.tv.num_shared = 3; - + p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); drm_gem_object_put(gobj); - size = amdgpu_bo_size(bo); + size = amdgpu_bo_size(p->uf_bo); if (size != PAGE_SIZE || (data->offset + 8) > size) { r = -EINVAL; goto error_unref; } - if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { + if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) { r = -EINVAL; goto error_unref; } @@ -154,7 +152,7 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, return 0; error_unref: - amdgpu_bo_unref(&bo); + amdgpu_bo_unref(&p->uf_bo); return r; } @@ -192,7 +190,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, uint64_t *chunk_array_user; uint64_t *chunk_array; uint32_t uf_offset = 0; - unsigned int size; + size_t size; int ret; int i; @@ -285,6 +283,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: + case AMDGPU_CHUNK_ID_CP_GFX_SHADOW: break; default: @@ -294,7 +293,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, if (!p->gang_size) { ret = -EINVAL; - goto free_partial_kdata; + goto free_all_kdata; } for (i = 0; i < p->gang_size; ++i) { @@ -305,12 +304,12 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, } p->gang_leader = p->jobs[p->gang_leader_idx]; - if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) { + if (p->ctx->generation != p->gang_leader->generation) { ret = -ECANCELED; goto free_all_kdata; } - if (p->uf_entry.tv.bo) + if (p->uf_bo) p->gang_leader->uf_addr = uf_offset; kvfree(chunk_array); @@ -355,7 +354,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, ib = &job->ibs[job->num_ibs++]; /* MM engine doesn't support user fences */ - if (p->uf_entry.tv.bo && ring->funcs->no_user_fence) + if (p->uf_bo && ring->funcs->no_user_fence) return -EINVAL; if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && @@ -393,7 +392,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p, { struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - unsigned num_deps; + unsigned int num_deps; int i, r; num_deps = chunk->length_dw * 4 / @@ -464,7 +463,7 @@ static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; - unsigned num_deps; + unsigned int num_deps; int i, r; num_deps = chunk->length_dw * 4 / @@ -482,7 +481,7 @@ static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; - unsigned num_deps; + unsigned int num_deps; int i, r; num_deps = chunk->length_dw * 4 / @@ -502,7 +501,7 @@ static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; - unsigned num_deps; + unsigned int num_deps; int i; num_deps = chunk->length_dw * 4 / @@ -536,7 +535,7 @@ static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p, struct amdgpu_cs_chunk *chunk) { struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; - unsigned num_deps; + unsigned int num_deps; int i; num_deps = chunk->length_dw * 4 / @@ -575,6 +574,26 @@ static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p, return 0; } +static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk *chunk) +{ + struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata; + int i; + + if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW) + return -EINVAL; + + for (i = 0; i < p->gang_size; ++i) { + p->jobs[i]->shadow_va = shadow->shadow_va; + p->jobs[i]->csa_va = shadow->csa_va; + p->jobs[i]->gds_va = shadow->gds_va; + p->jobs[i]->init_shadow = + shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW; + } + + return 0; +} + static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p) { unsigned int ce_preempt = 0, de_preempt = 0; @@ -617,6 +636,11 @@ static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p) if (r) return r; break; + case AMDGPU_CHUNK_ID_CP_GFX_SHADOW: + r = amdgpu_cs_p2_shadow(p, chunk); + if (r) + return r; + break; } } @@ -729,6 +753,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, if (used_vis_vram < total_vis_vram) { u64 free_vis_vram = total_vis_vram - used_vis_vram; + adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + increment_us, us_upper_bound); @@ -814,55 +839,18 @@ retry: return r; } -static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, - struct list_head *validated) -{ - struct ttm_operation_ctx ctx = { true, false }; - struct amdgpu_bo_list_entry *lobj; - int r; - - list_for_each_entry(lobj, validated, tv.head) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); - struct mm_struct *usermm; - - usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); - if (usermm && usermm != current->mm) - return -EPERM; - - if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && - lobj->user_invalidated && lobj->user_pages) { - amdgpu_bo_placement_from_domain(bo, - AMDGPU_GEM_DOMAIN_CPU); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) - return r; - - amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, - lobj->user_pages); - } - - r = amdgpu_cs_bo_validate(p, bo); - if (r) - return r; - - kvfree(lobj->user_pages); - lobj->user_pages = NULL; - } - return 0; -} - static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; + struct ttm_operation_ctx ctx = { true, false }; struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_list_entry *e; - struct list_head duplicates; + struct drm_gem_object *obj; + unsigned long index; unsigned int i; int r; - INIT_LIST_HEAD(&p->validated); - /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ if (cs->in.bo_list_handle) { if (p->bo_list) @@ -882,25 +870,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, mutex_lock(&p->bo_list->bo_list_mutex); - /* One for TTM and one for the CS job */ - amdgpu_bo_list_for_each_entry(e, p->bo_list) - e->tv.num_shared = 2; - - amdgpu_bo_list_get_list(p->bo_list, &p->validated); - - INIT_LIST_HEAD(&duplicates); - amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); - - if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) - list_add(&p->uf_entry.tv.head, &p->validated); - /* Get userptr backing pages. If pages are updated after registered * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do * amdgpu_ttm_backend_bind() to flush and invalidate new pages */ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); bool userpage_invalidated = false; + struct amdgpu_bo *bo = e->bo; int i; e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, @@ -928,18 +904,56 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, e->user_invalidated = userpage_invalidated; } - r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, - &duplicates); - if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) - DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); - goto out_free_user_pages; + drm_exec_until_all_locked(&p->exec) { + r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size); + drm_exec_retry_on_contention(&p->exec); + if (unlikely(r)) + goto out_free_user_pages; + + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + /* One fence for TTM and one for each CS job */ + r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base, + 1 + p->gang_size); + drm_exec_retry_on_contention(&p->exec); + if (unlikely(r)) + goto out_free_user_pages; + + e->bo_va = amdgpu_vm_bo_find(vm, e->bo); + } + + if (p->uf_bo) { + r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base, + 1 + p->gang_size); + drm_exec_retry_on_contention(&p->exec); + if (unlikely(r)) + goto out_free_user_pages; + } } - amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { + struct mm_struct *usermm; + + usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm); + if (usermm && usermm != current->mm) { + r = -EPERM; + goto out_free_user_pages; + } - e->bo_va = amdgpu_vm_bo_find(vm, bo); + if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) && + e->user_invalidated && e->user_pages) { + amdgpu_bo_placement_from_domain(e->bo, + AMDGPU_GEM_DOMAIN_CPU); + r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement, + &ctx); + if (r) + goto out_free_user_pages; + + amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm, + e->user_pages); + } + + kvfree(e->user_pages); + e->user_pages = NULL; } amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, @@ -951,25 +965,21 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, amdgpu_cs_bo_validate, p); if (r) { DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); - goto error_validate; + goto out_free_user_pages; } - r = amdgpu_cs_list_validate(p, &duplicates); - if (r) - goto error_validate; - - r = amdgpu_cs_list_validate(p, &p->validated); - if (r) - goto error_validate; - - if (p->uf_entry.tv.bo) { - struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo); + drm_exec_for_each_locked_object(&p->exec, index, obj) { + r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj)); + if (unlikely(r)) + goto out_free_user_pages; + } - r = amdgpu_ttm_alloc_gart(&uf->tbo); - if (r) - goto error_validate; + if (p->uf_bo) { + r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo); + if (unlikely(r)) + goto out_free_user_pages; - p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf); + p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo); } amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, @@ -981,12 +991,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, p->bo_list->oa_obj); return 0; -error_validate: - ttm_eu_backoff_reservation(&p->ticket, &p->validated); - out_free_user_pages: amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + struct amdgpu_bo *bo = e->bo; if (!e->user_pages) continue; @@ -1047,9 +1054,8 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p, /* the IB should be reserved at this point */ r = amdgpu_bo_kmap(aobj, (void **)&kptr); - if (r) { + if (r) return r; - } kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE); @@ -1093,7 +1099,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_list_entry *e; struct amdgpu_bo_va *bo_va; - struct amdgpu_bo *bo; unsigned int i; int r; @@ -1122,11 +1127,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) } amdgpu_bo_list_for_each_entry(e, p->bo_list) { - /* ignore duplicates */ - bo = ttm_to_amdgpu_bo(e->tv.bo); - if (!bo) - continue; - bo_va = e->bo_va; if (bo_va == NULL) continue; @@ -1164,7 +1164,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (amdgpu_vm_debug) { /* Invalidate all BOs to test for userspace bugs */ amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + struct amdgpu_bo *bo = e->bo; /* ignore duplicates */ if (!bo) @@ -1181,8 +1181,9 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct drm_gpu_scheduler *sched; - struct amdgpu_bo_list_entry *e; + struct drm_gem_object *obj; struct dma_fence *fence; + unsigned long index; unsigned int i; int r; @@ -1193,8 +1194,9 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) return r; } - list_for_each_entry(e, &p->validated, tv.head) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + drm_exec_for_each_locked_object(&p->exec, index, obj) { + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct dma_resv *resv = bo->tbo.base.resv; enum amdgpu_sync_mode sync_mode; @@ -1258,6 +1260,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_job *leader = p->gang_leader; struct amdgpu_bo_list_entry *e; + struct drm_gem_object *gobj; + unsigned long index; unsigned int i; uint64_t seq; int r; @@ -1296,9 +1300,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, */ r = 0; amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); - - r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); + r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm, + e->range); e->range = NULL; } if (r) { @@ -1308,20 +1311,22 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, } p->fence = dma_fence_get(&leader->base.s_fence->finished); - list_for_each_entry(e, &p->validated, tv.head) { + drm_exec_for_each_locked_object(&p->exec, index, gobj) { + + ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo); /* Everybody except for the gang leader uses READ */ for (i = 0; i < p->gang_size; ++i) { if (p->jobs[i] == leader) continue; - dma_resv_add_fence(e->tv.bo->base.resv, + dma_resv_add_fence(gobj->resv, &p->jobs[i]->base.s_fence->finished, DMA_RESV_USAGE_READ); } - /* The gang leader is remembered as writer */ - e->tv.num_shared = 0; + /* The gang leader as remembered as writer */ + dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE); } seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx], @@ -1337,7 +1342,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, cs->out.handle = seq; leader->uf_sequence = seq; - amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); + amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket); for (i = 0; i < p->gang_size; ++i) { amdgpu_job_free_resources(p->jobs[i]); trace_amdgpu_cs_ioctl(p->jobs[i]); @@ -1346,7 +1351,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, } amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); - ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); mutex_unlock(&p->adev->notifier_lock); mutex_unlock(&p->bo_list->bo_list_mutex); @@ -1356,9 +1360,11 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, /* Cleanup the parser structure */ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) { - unsigned i; + unsigned int i; amdgpu_sync_free(&parser->sync); + drm_exec_fini(&parser->exec); + for (i = 0; i < parser->num_post_deps; i++) { drm_syncobj_put(parser->post_deps[i].syncobj); kfree(parser->post_deps[i].chain); @@ -1379,11 +1385,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) if (parser->jobs[i]) amdgpu_job_free(parser->jobs[i]); } - if (parser->uf_entry.tv.bo) { - struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo); - - amdgpu_bo_unref(&uf); - } + amdgpu_bo_unref(&parser->uf_bo); } int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) @@ -1444,7 +1446,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return 0; error_backoff: - ttm_eu_backoff_reservation(&parser.ticket, &parser.validated); mutex_unlock(&parser.bo_list->bo_list_mutex); error_fini: @@ -1624,15 +1625,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, continue; r = dma_fence_wait_timeout(fence, true, timeout); + if (r > 0 && fence->error) + r = fence->error; + dma_fence_put(fence); if (r < 0) return r; if (r == 0) break; - - if (fence->error) - return fence->error; } memset(wait, 0, sizeof(*wait)); @@ -1779,7 +1780,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, *map = mapping; /* Double check that the BO is reserved by this CS */ - if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) + if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket) return -EINVAL; if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { |