diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
28 files changed, 539 insertions, 530 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 8b14c55a0793..80b6b8e432fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -253,53 +253,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, struct amdgpu_amdkfd_fence *ef) { - struct dma_resv *resv = bo->tbo.base.resv; - struct dma_resv_list *old, *new; - unsigned int i, j, k; + struct dma_fence *replacement; if (!ef) return -EINVAL; - old = dma_resv_shared_list(resv); - if (!old) - return 0; - - new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL); - if (!new) - return -ENOMEM; - - /* Go through all the shared fences in the resevation object and sort - * the interesting ones to the end of the list. + /* TODO: Instead of block before we should use the fence of the page + * table update and TLB flush here directly. */ - for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) { - struct dma_fence *f; - - f = rcu_dereference_protected(old->shared[i], - dma_resv_held(resv)); - - if (f->context == ef->base.context) - RCU_INIT_POINTER(new->shared[--j], f); - else - RCU_INIT_POINTER(new->shared[k++], f); - } - new->shared_max = old->shared_max; - new->shared_count = k; - - /* Install the new fence list, seqcount provides the barriers */ - write_seqcount_begin(&resv->seq); - RCU_INIT_POINTER(resv->fence, new); - write_seqcount_end(&resv->seq); - - /* Drop the references to the removed fences or move them to ef_list */ - for (i = j; i < old->shared_count; ++i) { - struct dma_fence *f; - - f = rcu_dereference_protected(new->shared[i], - dma_resv_held(resv)); - dma_fence_put(f); - } - kfree_rcu(old, rcu); - + replacement = dma_fence_get_stub(); + dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, + replacement, DMA_RESV_USAGE_READ); + dma_fence_put(replacement); return 0; } @@ -1266,7 +1231,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, AMDGPU_FENCE_OWNER_KFD, false); if (ret) goto wait_pd_fail; - ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1); + ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1); if (ret) goto reserve_shared_fail; amdgpu_bo_fence(vm->root.bo, @@ -2480,6 +2445,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) struct amdgpu_bo *bo = mem->bo; uint32_t domain = mem->domain; struct kfd_mem_attachment *attachment; + struct dma_resv_iter cursor; + struct dma_fence *fence; total_size += amdgpu_bo_size(bo); @@ -2494,10 +2461,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) goto validate_map_fail; } } - ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving); - if (ret) { - pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); - goto validate_map_fail; + dma_resv_for_each_fence(&cursor, bo->tbo.base.resv, + DMA_RESV_USAGE_KERNEL, fence) { + ret = amdgpu_sync_fence(&sync_obj, fence); + if (ret) { + pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); + goto validate_map_fail; + } } list_for_each_entry(attachment, &mem->attachments, list) { if (!attachment->is_mapped) @@ -2604,7 +2574,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem * Add process eviction fence to bo so they can * evict each other. */ - ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1); + ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1); if (ret) goto reserve_shared_fail; amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index 044b41f0bfd9..529d52a204cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -34,7 +34,6 @@ struct amdgpu_fpriv; struct amdgpu_bo_list_entry { struct ttm_validate_buffer tv; struct amdgpu_bo_va *bo_va; - struct dma_fence_chain *chain; uint32_t priority; struct page **user_pages; bool user_invalidated; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 673078faa27a..b7933c2ce765 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -24,9 +24,9 @@ * Alex Deucher */ +#include <drm/display/drm_dp_helper.h> #include <drm/drm_edid.h> #include <drm/drm_fb_helper.h> -#include <drm/dp/drm_dp_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 67bd506fa141..2982b543c27f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -55,8 +55,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); p->uf_entry.priority = 0; p->uf_entry.tv.bo = &bo->tbo; - /* One for TTM and one for the CS job */ - p->uf_entry.tv.num_shared = 2; + /* One for TTM and two for the CS job */ + p->uf_entry.tv.num_shared = 3; drm_gem_object_put(gobj); @@ -128,6 +128,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs goto free_chunk; } + mutex_lock(&p->ctx->lock); + /* skip guilty context job */ if (atomic_read(&p->ctx->guilty) == 1) { ret = -ECANCELED; @@ -574,14 +576,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); e->bo_va = amdgpu_vm_bo_find(vm, bo); - - if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) { - e->chain = dma_fence_chain_alloc(); - if (!e->chain) { - r = -ENOMEM; - goto error_validate; - } - } } /* Move fence waiting after getting reservation lock of @@ -642,13 +636,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } error_validate: - if (r) { - amdgpu_bo_list_for_each_entry(e, p->bo_list) { - dma_fence_chain_free(e->chain); - e->chain = NULL; - } + if (r) ttm_eu_backoff_reservation(&p->ticket, &p->validated); - } out_free_user_pages: if (r) { @@ -700,17 +689,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, { unsigned i; - if (error && backoff) { - struct amdgpu_bo_list_entry *e; - - amdgpu_bo_list_for_each_entry(e, parser->bo_list) { - dma_fence_chain_free(e->chain); - e->chain = NULL; - } - + if (error && backoff) ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); - } for (i = 0; i < parser->num_post_deps; i++) { drm_syncobj_put(parser->post_deps[i].syncobj); @@ -721,6 +702,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, dma_fence_put(parser->fence); if (parser->ctx) { + mutex_unlock(&parser->ctx->lock); amdgpu_ctx_put(parser->ctx); } if (parser->bo_list) @@ -1169,6 +1151,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, { int i, r; + /* TODO: Investigate why we still need the context lock */ + mutex_unlock(&p->ctx->lock); + for (i = 0; i < p->nchunks; ++i) { struct amdgpu_cs_chunk *chunk; @@ -1179,32 +1164,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: r = amdgpu_cs_process_fence_dep(p, chunk); if (r) - return r; + goto out; break; case AMDGPU_CHUNK_ID_SYNCOBJ_IN: r = amdgpu_cs_process_syncobj_in_dep(p, chunk); if (r) - return r; + goto out; break; case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: r = amdgpu_cs_process_syncobj_out_dep(p, chunk); if (r) - return r; + goto out; break; case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk); if (r) - return r; + goto out; break; case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk); if (r) - return r; + goto out; break; } } - return 0; +out: + mutex_lock(&p->ctx->lock); + return r; } static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) @@ -1284,24 +1271,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); - amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct dma_resv *resv = e->tv.bo->base.resv; - struct dma_fence_chain *chain = e->chain; - - if (!chain) - continue; - - /* - * Work around dma_resv shortcomings by wrapping up the - * submission in a dma_fence_chain and add it as exclusive - * fence. - */ - dma_fence_chain_init(chain, dma_resv_excl_fence(resv), - dma_fence_get(p->fence), 1); - - rcu_assign_pointer(resv->fence_excl, &chain->base); - e->chain = NULL; - } + /* Make sure all BOs are remembered as writers */ + amdgpu_bo_list_for_each_entry(e, p->bo_list) + e->tv.num_shared = 0; ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); mutex_unlock(&p->adev->notifier_lock); @@ -1380,6 +1352,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) goto out; r = amdgpu_cs_submit(&parser, cs); + out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 5981c7d9bd48..8f0e6d93bb9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -237,6 +237,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, kref_init(&ctx->refcount); spin_lock_init(&ctx->ring_lock); + mutex_init(&ctx->lock); ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); ctx->reset_counter_query = ctx->reset_counter; @@ -357,6 +358,7 @@ static void amdgpu_ctx_fini(struct kref *ref) drm_dev_exit(idx); } + mutex_destroy(&ctx->lock); kfree(ctx); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index d0cbfcea90f7..142f2f87d44c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -49,6 +49,7 @@ struct amdgpu_ctx { bool preamble_presented; int32_t init_priority; int32_t override_priority; + struct mutex lock; atomic_t guilty; unsigned long ras_counter_ce; unsigned long ras_counter_ue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 787724166952..17c9bbe0cbc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -206,8 +206,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - /* TODO: Unify this with other drivers */ - r = dma_resv_get_fences(new_abo->tbo.base.resv, true, + r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE, &work->shared_count, &work->shared); if (unlikely(r != 0)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 579adfafe4d0..782cbca37538 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) { struct drm_gem_object *obj = attach->dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - int r; /* pin buffer into GTT */ - r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); - if (r) - return r; - - if (bo->tbo.moving) { - r = dma_fence_wait(bo->tbo.moving, true); - if (r) { - amdgpu_bo_unpin(bo); - return r; - } - } - return 0; + return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 89c6d6f1d4fa..652571267077 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -526,7 +526,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_amdgpu_bo(gobj); - ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, + true, timeout); /* ret == 0 means not signaled, * ret > 0 means signaled diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index dd78402e3cb0..8c6b2284cf56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -26,23 +26,12 @@ #include "amdgpu.h" -struct amdgpu_gtt_node { - struct ttm_buffer_object *tbo; - struct ttm_range_mgr_node base; -}; - static inline struct amdgpu_gtt_mgr * to_gtt_mgr(struct ttm_resource_manager *man) { return container_of(man, struct amdgpu_gtt_mgr, manager); } -static inline struct amdgpu_gtt_node * -to_amdgpu_gtt_node(struct ttm_resource *res) -{ - return container_of(res, struct amdgpu_gtt_node, base.base); -} - /** * DOC: mem_info_gtt_total * @@ -106,9 +95,9 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = { */ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res) { - struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res); + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); - return drm_mm_node_allocated(&node->base.mm_nodes[0]); + return drm_mm_node_allocated(&node->mm_nodes[0]); } /** @@ -128,15 +117,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); uint32_t num_pages = PFN_UP(tbo->base.size); - struct amdgpu_gtt_node *node; + struct ttm_range_mgr_node *node; int r; - node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL); + node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL); if (!node) return -ENOMEM; - node->tbo = tbo; - ttm_resource_init(tbo, place, &node->base.base); + ttm_resource_init(tbo, place, &node->base); if (!(place->flags & TTM_PL_FLAG_TEMPORARY) && ttm_resource_manager_usage(man) > man->size) { r = -ENOSPC; @@ -145,8 +133,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, if (place->lpfn) { spin_lock(&mgr->lock); - r = drm_mm_insert_node_in_range(&mgr->mm, - &node->base.mm_nodes[0], + r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0], num_pages, tbo->page_alignment, 0, place->fpfn, place->lpfn, DRM_MM_INSERT_BEST); @@ -154,18 +141,18 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, if (unlikely(r)) goto err_free; - node->base.base.start = node->base.mm_nodes[0].start; + node->base.start = node->mm_nodes[0].start; } else { - node->base.mm_nodes[0].start = 0; - node->base.mm_nodes[0].size = node->base.base.num_pages; - node->base.base.start = AMDGPU_BO_INVALID_OFFSET; + node->mm_nodes[0].start = 0; + node->mm_nodes[0].size = node->base.num_pages; + node->base.start = AMDGPU_BO_INVALID_OFFSET; } - *res = &node->base.base; + *res = &node->base; return 0; err_free: - ttm_resource_fini(man, &node->base.base); + ttm_resource_fini(man, &node->base); kfree(node); return r; } @@ -181,12 +168,12 @@ err_free: static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, struct ttm_resource *res) { - struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res); + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); spin_lock(&mgr->lock); - if (drm_mm_node_allocated(&node->base.mm_nodes[0])) - drm_mm_remove_node(&node->base.mm_nodes[0]); + if (drm_mm_node_allocated(&node->mm_nodes[0])) + drm_mm_remove_node(&node->mm_nodes[0]); spin_unlock(&mgr->lock); ttm_resource_fini(man, res); @@ -202,15 +189,15 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, */ void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr) { - struct amdgpu_gtt_node *node; + struct ttm_range_mgr_node *node; struct drm_mm_node *mm_node; struct amdgpu_device *adev; adev = container_of(mgr, typeof(*adev), mman.gtt_mgr); spin_lock(&mgr->lock); drm_mm_for_each_node(mm_node, &mgr->mm) { - node = container_of(mm_node, typeof(*node), base.mm_nodes[0]); - amdgpu_ttm_recover_gart(node->tbo); + node = container_of(mm_node, typeof(*node), mm_nodes[0]); + amdgpu_ttm_recover_gart(node->base.bo); } spin_unlock(&mgr->lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 558f90e11d78..03d115d2b5ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -107,36 +107,19 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence, void amdgpu_pasid_free_delayed(struct dma_resv *resv, u32 pasid) { - struct dma_fence *fence, **fences; struct amdgpu_pasid_cb *cb; - unsigned count; + struct dma_fence *fence; int r; - r = dma_resv_get_fences(resv, true, &count, &fences); + r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence); if (r) goto fallback; - if (count == 0) { + if (!fence) { amdgpu_pasid_free(pasid); return; } - if (count == 1) { - fence = fences[0]; - kfree(fences); - } else { - uint64_t context = dma_fence_context_alloc(1); - struct dma_fence_array *array; - - array = dma_fence_array_create(count, fences, context, - 1, false); - if (!array) { - kfree(fences); - goto fallback; - } - fence = &array->base; - } - cb = kmalloc(sizeof(*cb), GFP_KERNEL); if (!cb) { /* Last resort when we are OOM */ @@ -156,7 +139,8 @@ fallback: /* Not enough memory for the delayed delete, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); + dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, + false, MAX_SCHEDULE_TIMEOUT); amdgpu_pasid_free(pasid); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 4b153daf283d..b86c0b8252a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); - r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, + false, MAX_SCHEDULE_TIMEOUT); mutex_unlock(&adev->notifier_lock); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index e8da738b309e..f80b4838cea1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -30,10 +30,10 @@ #ifndef AMDGPU_MODE_H #define AMDGPU_MODE_H +#include <drm/display/drm_dp_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> -#include <drm/dp/drm_dp_helper.h> #include <drm/drm_fixed.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> @@ -44,7 +44,7 @@ #include <linux/hrtimer.h> #include "amdgpu_irq.h" -#include <drm/dp/drm_dp_mst_helper.h> +#include <drm/display/drm_dp_mst_helper.h> #include "modules/inc/mod_freesync.h" #include "amdgpu_dm_irq_params.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index a00022b6ee5b..5444515c1476 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -612,9 +612,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (unlikely(r)) goto fail_unreserve; - amdgpu_bo_fence(bo, fence, false); - dma_fence_put(bo->tbo.moving); - bo->tbo.moving = dma_fence_get(fence); + dma_resv_add_fence(bo->tbo.base.resv, fence, + DMA_RESV_USAGE_KERNEL); dma_fence_put(fence); } if (!bp->resv) @@ -761,6 +760,11 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) return -EPERM; + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL, + false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) + return r; + kptr = amdgpu_bo_kptr(bo); if (kptr) { if (ptr) @@ -768,11 +772,6 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) return 0; } - r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); - if (r < 0) - return r; - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); if (r) return r; @@ -1390,11 +1389,17 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared) { struct dma_resv *resv = bo->tbo.base.resv; + int r; - if (shared) - dma_resv_add_shared_fence(resv, fence); - else - dma_resv_add_excl_fence(resv, fence); + r = dma_resv_reserve_fences(resv, 1); + if (r) { + /* As last resort on OOM we block for the fence */ + dma_fence_wait(fence, false); + return; + } + + dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ : + DMA_RESV_USAGE_WRITE); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index acfa207cf970..6546552e596c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -30,12 +30,15 @@ #include <drm/ttm/ttm_resource.h> #include <drm/ttm/ttm_range_manager.h> +#include "amdgpu_vram_mgr.h" + /* state back for walking over vram_mgr and gtt_mgr allocations */ struct amdgpu_res_cursor { uint64_t start; uint64_t size; uint64_t remaining; - struct drm_mm_node *node; + void *node; + uint32_t mem_type; }; /** @@ -52,27 +55,63 @@ static inline void amdgpu_res_first(struct ttm_resource *res, uint64_t start, uint64_t size, struct amdgpu_res_cursor *cur) { + struct drm_buddy_block *block; + struct list_head *head, *next; struct drm_mm_node *node; - if (!res || res->mem_type == TTM_PL_SYSTEM) { - cur->start = start; - cur->size = size; - cur->remaining = size; - cur->node = NULL; - WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); - return; - } + if (!res) + goto fallback; BUG_ON(start + size > res->num_pages << PAGE_SHIFT); - node = to_ttm_range_mgr_node(res)->mm_nodes; - while (start >= node->size << PAGE_SHIFT) - start -= node++->size << PAGE_SHIFT; + cur->mem_type = res->mem_type; + + switch (cur->mem_type) { + case TTM_PL_VRAM: + head = &to_amdgpu_vram_mgr_resource(res)->blocks; + + block = list_first_entry_or_null(head, + struct drm_buddy_block, + link); + if (!block) + goto fallback; + + while (start >= amdgpu_vram_mgr_block_size(block)) { + start -= amdgpu_vram_mgr_block_size(block); + + next = block->link.next; + if (next != head) + block = list_entry(next, struct drm_buddy_block, link); + } + + cur->start = amdgpu_vram_mgr_block_start(block) + start; + cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size); + cur->remaining = size; + cur->node = block; + break; + case TTM_PL_TT: + node = to_ttm_range_mgr_node(res)->mm_nodes; + while (start >= node->size << PAGE_SHIFT) + start -= node++->size << PAGE_SHIFT; + + cur->start = (node->start << PAGE_SHIFT) + start; + cur->size = min((node->size << PAGE_SHIFT) - start, size); + cur->remaining = size; + cur->node = node; + break; + default: + goto fallback; + } - cur->start = (node->start << PAGE_SHIFT) + start; - cur->size = min((node->size << PAGE_SHIFT) - start, size); + return; + +fallback: + cur->start = start; + cur->size = size; cur->remaining = size; - cur->node = node; + cur->node = NULL; + WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); + return; } /** @@ -85,7 +124,9 @@ static inline void amdgpu_res_first(struct ttm_resource *res, */ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) { - struct drm_mm_node *node = cur->node; + struct drm_buddy_block *block; + struct drm_mm_node *node; + struct list_head *next; BUG_ON(size > cur->remaining); @@ -99,9 +140,27 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) return; } - cur->node = ++node; - cur->start = node->start << PAGE_SHIFT; - cur->size = min(node->size << PAGE_SHIFT, cur->remaining); + switch (cur->mem_type) { + case TTM_PL_VRAM: + block = cur->node; + + next = block->link.next; + block = list_entry(next, struct drm_buddy_block, link); + + cur->node = block; + cur->start = amdgpu_vram_mgr_block_start(block); + cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining); + break; + case TTM_PL_TT: + node = cur->node; + + cur->node = ++node; + cur->start = node->start << PAGE_SHIFT; + cur->size = min(node->size << PAGE_SHIFT, cur->remaining); + break; + default: + return; + } } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index edd49dd27422..504af1b93bfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -241,7 +241,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, if (resv == NULL) return -EINVAL; - dma_resv_for_each_fence(&cursor, resv, true, f) { + /* TODO: Use DMA_RESV_USAGE_READ here */ + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) { dma_fence_chain_for_each(f, f) { struct dma_fence *tmp = dma_fence_chain_contained(f); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 4b9ee6e27f74..ec26edd4f4d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1344,7 +1344,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) { + dma_resv_for_each_fence(&resv_cursor, bo->base.resv, + DMA_RESV_USAGE_BOOKKEEP, f) { if (amdkfd_fence_check_mm(f, current->mm)) return false; } @@ -1547,7 +1548,6 @@ static struct ttm_device_funcs amdgpu_bo_driver = { .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_pfn = amdgpu_ttm_io_mem_pfn, .access_memory = &amdgpu_ttm_access_memory, - .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify }; /* @@ -2161,17 +2161,6 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) #if defined(CONFIG_DEBUG_FS) -static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - TTM_PL_VRAM); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) { struct amdgpu_device *adev = (struct amdgpu_device *)m->private; @@ -2179,55 +2168,6 @@ static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) return ttm_pool_debugfs(&adev->mman.bdev.pool, m); } -static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - TTM_PL_TT); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - -static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - AMDGPU_PL_GDS); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - -static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - AMDGPU_PL_GWS); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - -static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - AMDGPU_PL_OA); - struct drm_printer p = drm_seq_file_printer(m); - - ttm_resource_manager_debug(man, &p); - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table); DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool); /* @@ -2437,17 +2377,23 @@ void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size); debugfs_create_file("amdgpu_iomem", 0444, root, adev, &amdgpu_ttm_iomem_fops); - debugfs_create_file("amdgpu_vram_mm", 0444, root, adev, - &amdgpu_mm_vram_table_fops); - debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev, - &amdgpu_mm_tt_table_fops); - debugfs_create_file("amdgpu_gds_mm", 0444, root, adev, - &amdgpu_mm_gds_table_fops); - debugfs_create_file("amdgpu_gws_mm", 0444, root, adev, - &amdgpu_mm_gws_table_fops); - debugfs_create_file("amdgpu_oa_mm", 0444, root, adev, - &amdgpu_mm_oa_table_fops); debugfs_create_file("ttm_page_pool", 0444, root, adev, &amdgpu_ttm_page_pool_fops); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + TTM_PL_VRAM), + root, "amdgpu_vram_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + TTM_PL_TT), + root, "amdgpu_gtt_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GDS), + root, "amdgpu_gds_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GWS), + root, "amdgpu_gws_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_OA), + root, "amdgpu_oa_mm"); + #endif } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 9120ae80ef52..6a70818039dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -26,6 +26,7 @@ #include <linux/dma-direction.h> #include <drm/gpu_scheduler.h> +#include "amdgpu_vram_mgr.h" #include "amdgpu.h" #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) @@ -38,15 +39,6 @@ #define AMDGPU_POISON 0xd0bed0be -struct amdgpu_vram_mgr { - struct ttm_resource_manager manager; - struct drm_mm mm; - spinlock_t lock; - struct list_head reservations_pending; - struct list_head reserved_pages; - atomic64_t vis_usage; -}; - struct amdgpu_gtt_mgr { struct ttm_resource_manager manager; struct drm_mm mm; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 39c74d9fa7cc..6eac649499d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1163,7 +1163,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + r = dma_resv_wait_timeout(bo->tbo.base.resv, + DMA_RESV_USAGE_KERNEL, false, msecs_to_jiffies(10)); if (r == 0) r = -ETIMEDOUT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 5224d9a39737..576849e95296 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -302,9 +302,6 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, struct drm_gem_object *obj; struct amdgpu_device *adev; struct amdgpu_bo *rbo; - struct list_head list; - struct ttm_validate_buffer tv; - struct ww_acquire_ctx ticket; uint32_t domain; int r; @@ -316,18 +313,19 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, obj = new_state->fb->obj[0]; rbo = gem_to_amdgpu_bo(obj); adev = amdgpu_ttm_adev(rbo->tbo.bdev); - INIT_LIST_HEAD(&list); - tv.bo = &rbo->tbo; - tv.num_shared = 1; - list_add(&tv.head, &list); - - r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); + r = amdgpu_bo_reserve(rbo, true); if (r) { dev_err(adev->dev, "fail to reserve bo (%d)\n", r); return r; } + r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); + if (r) { + dev_err(adev->dev, "allocating fence slot failed (%d)\n", r); + goto error_unlock; + } + if (plane->type != DRM_PLANE_TYPE_CURSOR) domain = amdgpu_display_supported_domains(adev, rbo->flags); else @@ -337,25 +335,29 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, if (unlikely(r != 0)) { if (r != -ERESTARTSYS) DRM_ERROR("Failed to pin framebuffer with error %d\n", r); - ttm_eu_backoff_reservation(&ticket, &list); - return r; + goto error_unlock; } r = amdgpu_ttm_alloc_gart(&rbo->tbo); if (unlikely(r != 0)) { - amdgpu_bo_unpin(rbo); - ttm_eu_backoff_reservation(&ticket, &list); DRM_ERROR("%p bind failed\n", rbo); - return r; + goto error_unpin; } - ttm_eu_backoff_reservation(&ticket, &list); + amdgpu_bo_unreserve(rbo); afb->address = amdgpu_bo_gpu_offset(rbo); amdgpu_bo_ref(rbo); return 0; + +error_unpin: + amdgpu_bo_unpin(rbo); + +error_unlock: + amdgpu_bo_unreserve(rbo); + return r; } static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 4736174f5e4d..f9479e23de18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -289,7 +289,7 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, dma_resv_assert_held(vm->root.bo->tbo.base.resv); - vm->bulk_moveable = false; + ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) amdgpu_vm_bo_relocated(base); else @@ -330,36 +330,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, } /** - * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag - * - * @bo: BO which was removed from the LRU - * - * Make sure the bulk_moveable flag is updated when a BO is removed from the - * LRU. - */ -void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) -{ - struct amdgpu_bo *abo; - struct amdgpu_vm_bo_base *bo_base; - - if (!amdgpu_bo_is_amdgpu_bo(bo)) - return; - - if (bo->pin_count) - return; - - abo = ttm_to_amdgpu_bo(bo); - if (!abo->parent) - return; - for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { - struct amdgpu_vm *vm = bo_base->vm; - - if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv) - vm->bulk_moveable = false; - } - -} -/** * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU * * @adev: amdgpu device pointer @@ -371,35 +341,9 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct amdgpu_vm_bo_base *bo_base; - - if (vm->bulk_moveable) { - spin_lock(&adev->mman.bdev.lru_lock); - ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); - spin_unlock(&adev->mman.bdev.lru_lock); - return; - } - - memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); - spin_lock(&adev->mman.bdev.lru_lock); - list_for_each_entry(bo_base, &vm->idle, vm_status) { - struct amdgpu_bo *bo = bo_base->bo; - struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo); - - if (!bo->parent) - continue; - - ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource, - &vm->lru_bulk_move); - if (shadow) - ttm_bo_move_to_lru_tail(&shadow->tbo, - shadow->tbo.resource, - &vm->lru_bulk_move); - } + ttm_lru_bulk_move_tail(&vm->lru_bulk_move); spin_unlock(&adev->mman.bdev.lru_lock); - - vm->bulk_moveable = true; } /** @@ -422,8 +366,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_bo_base *bo_base, *tmp; int r; - vm->bulk_moveable &= list_empty(&vm->evicted); - list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { struct amdgpu_bo *bo = bo_base->bo; struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo); @@ -1244,7 +1186,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_for_each_fence(&cursor, resv, true, fence) { + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) { /* Add a callback for each fence in the reservation object */ amdgpu_vm_prt_get(adev); amdgpu_vm_add_prt_cb(adev, fence); @@ -1796,7 +1738,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev, if (bo) { dma_resv_assert_held(bo->tbo.base.resv); if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) - vm->bulk_moveable = false; + ttm_bo_set_bulk_move(&bo->tbo, NULL); for (base = &bo_va->base.bo->vm_bo; *base; base = &(*base)->next) { @@ -1850,7 +1792,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return true; /* Don't evict VM page tables while they are busy */ - if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) + if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP)) return false; /* Try to block ongoing updates */ @@ -2030,7 +1972,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, + timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, + DMA_RESV_USAGE_BOOKKEEP, true, timeout); if (timeout <= 0) return timeout; @@ -2112,7 +2055,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) if (r) goto error_free_root; - r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1); + r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1); if (r) goto error_unreserve; @@ -2534,7 +2477,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, value = 0; } - r = dma_resv_reserve_shared(root->tbo.base.resv, 1); + r = dma_resv_reserve_fences(root->tbo.base.resv, 1); if (r) { pr_debug("failed %d to reserve fence slot\n", r); goto error_unlock; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 6b06a214f05f..9ecb7f663e19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -321,8 +321,6 @@ struct amdgpu_vm { /* Store positions of group of BOs */ struct ttm_lru_bulk_move lru_bulk_move; - /* mark whether can do the bulk move */ - bool bulk_moveable; /* Flag to indicate if VM is used for compute */ bool is_compute_context; }; @@ -457,7 +455,6 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm); -void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo); void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, uint64_t *gtt_mem, uint64_t *cpu_mem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index e3fbf0f10add..31913ae86de6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -74,13 +74,12 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, { unsigned int i; uint64_t value; - int r; + long r; - if (vmbo->bo.tbo.moving) { - r = dma_fence_wait(vmbo->bo.tbo.moving, true); - if (r) - return r; - } + r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL, + true, MAX_SCHEDULE_TIMEOUT); + if (r < 0) + return r; pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 958d7ed97882..88de9f0d4728 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -631,9 +631,13 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry) if (!entry->bo) return; shadow = amdgpu_bo_shadowed(entry->bo); + if (shadow) { + ttm_bo_set_bulk_move(&shadow->tbo, NULL); + amdgpu_bo_unref(&shadow); + } + ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); entry->bo->vm_bo = NULL; list_del(&entry->vm_status); - amdgpu_bo_unref(&shadow); amdgpu_bo_unref(&entry->bo); } @@ -653,8 +657,6 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev, struct amdgpu_vm_pt_cursor cursor; struct amdgpu_vm_bo_base *entry; - vm->bulk_moveable = false; - for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) amdgpu_vm_pt_free(entry); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 69fba68ff88e..1fd3cbca20a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -204,14 +204,19 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, struct amdgpu_bo *bo = &vmbo->bo; enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE : AMDGPU_IB_POOL_DELAYED; + struct dma_resv_iter cursor; unsigned int i, ndw, nptes; + struct dma_fence *fence; uint64_t *pte; int r; /* Wait for PD/PT moves to be completed */ - r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving); - if (r) - return r; + dma_resv_for_each_fence(&cursor, bo->tbo.base.resv, + DMA_RESV_USAGE_KERNEL, fence) { + r = amdgpu_sync_fence(&p->job->sync, fence); + if (r) + return r; + } do { ndw = p->num_dw_left; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 0a7611648573..49e4092f447f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -32,8 +32,10 @@ #include "atom.h" struct amdgpu_vram_reservation { - struct list_head node; - struct drm_mm_node mm_node; + u64 start; + u64 size; + struct list_head allocated; + struct list_head blocks; }; static inline struct amdgpu_vram_mgr * @@ -186,18 +188,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = { }; /** - * amdgpu_vram_mgr_vis_size - Calculate visible node size + * amdgpu_vram_mgr_vis_size - Calculate visible block size * * @adev: amdgpu_device pointer - * @node: MM node structure + * @block: DRM BUDDY block structure * - * Calculate how many bytes of the MM node are inside visible VRAM + * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM */ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, - struct drm_mm_node *node) + struct drm_buddy_block *block) { - uint64_t start = node->start << PAGE_SHIFT; - uint64_t end = (node->size + node->start) << PAGE_SHIFT; + u64 start = amdgpu_vram_mgr_block_start(block); + u64 end = start + amdgpu_vram_mgr_block_size(block); if (start >= adev->gmc.visible_vram_size) return 0; @@ -218,9 +220,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_resource *res = bo->tbo.resource; - unsigned pages = res->num_pages; - struct drm_mm_node *mm; - u64 usage; + struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); + struct drm_buddy_block *block; + u64 usage = 0; if (amdgpu_gmc_vram_full_visible(&adev->gmc)) return amdgpu_bo_size(bo); @@ -228,9 +230,8 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) return 0; - mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0]; - for (usage = 0; pages; pages -= mm->size, mm++) - usage += amdgpu_vram_mgr_vis_size(adev, mm); + list_for_each_entry(block, &vres->blocks, link) + usage += amdgpu_vram_mgr_vis_size(adev, block); return usage; } @@ -240,23 +241,30 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - struct drm_mm *mm = &mgr->mm; + struct drm_buddy *mm = &mgr->mm; struct amdgpu_vram_reservation *rsv, *temp; + struct drm_buddy_block *block; uint64_t vis_usage; - list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) { - if (drm_mm_reserve_node(mm, &rsv->mm_node)) + list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) { + if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size, + rsv->size, mm->chunk_size, &rsv->allocated, + DRM_BUDDY_RANGE_ALLOCATION)) + continue; + + block = amdgpu_vram_mgr_first_block(&rsv->allocated); + if (!block) continue; dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n", - rsv->mm_node.start, rsv->mm_node.size); + rsv->start, rsv->size); - vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node); + vis_usage = amdgpu_vram_mgr_vis_size(adev, block); atomic64_add(vis_usage, &mgr->vis_usage); spin_lock(&man->bdev->lru_lock); - man->usage += rsv->mm_node.size << PAGE_SHIFT; + man->usage += rsv->size; spin_unlock(&man->bdev->lru_lock); - list_move(&rsv->node, &mgr->reserved_pages); + list_move(&rsv->blocks, &mgr->reserved_pages); } } @@ -278,14 +286,16 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, if (!rsv) return -ENOMEM; - INIT_LIST_HEAD(&rsv->node); - rsv->mm_node.start = start >> PAGE_SHIFT; - rsv->mm_node.size = size >> PAGE_SHIFT; + INIT_LIST_HEAD(&rsv->allocated); + INIT_LIST_HEAD(&rsv->blocks); - spin_lock(&mgr->lock); - list_add_tail(&rsv->node, &mgr->reservations_pending); + rsv->start = start; + rsv->size = size; + + mutex_lock(&mgr->lock); + list_add_tail(&rsv->blocks, &mgr->reservations_pending); amdgpu_vram_mgr_do_reserve(&mgr->manager); - spin_unlock(&mgr->lock); + mutex_unlock(&mgr->lock); return 0; } @@ -307,19 +317,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, struct amdgpu_vram_reservation *rsv; int ret; - spin_lock(&mgr->lock); + mutex_lock(&mgr->lock); - list_for_each_entry(rsv, &mgr->reservations_pending, node) { - if ((rsv->mm_node.start <= start) && - (start < (rsv->mm_node.start + rsv->mm_node.size))) { + list_for_each_entry(rsv, &mgr->reservations_pending, blocks) { + if (rsv->start <= start && + (start < (rsv->start + rsv->size))) { ret = -EBUSY; goto out; } } - list_for_each_entry(rsv, &mgr->reserved_pages, node) { - if ((rsv->mm_node.start <= start) && - (start < (rsv->mm_node.start + rsv->mm_node.size))) { + list_for_each_entry(rsv, &mgr->reserved_pages, blocks) { + if (rsv->start <= start && + (start < (rsv->start + rsv->size))) { ret = 0; goto out; } @@ -327,33 +337,11 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, ret = -ENOENT; out: - spin_unlock(&mgr->lock); + mutex_unlock(&mgr->lock); return ret; } /** - * amdgpu_vram_mgr_virt_start - update virtual start address - * - * @mem: ttm_resource to update - * @node: just allocated node - * - * Calculate a virtual BO start address to easily check if everything is CPU - * accessible. - */ -static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, - struct drm_mm_node *node) -{ - unsigned long start; - - start = node->start + node->size; - if (start > mem->num_pages) - start -= mem->num_pages; - else - start = 0; - mem->start = max(mem->start, start); -} - -/** * amdgpu_vram_mgr_new - allocate new ranges * * @man: TTM memory type manager @@ -368,46 +356,44 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, const struct ttm_place *place, struct ttm_resource **res) { - unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages; + u64 vis_usage = 0, max_bytes, cur_size, min_block_size; struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - uint64_t vis_usage = 0, mem_bytes, max_bytes; - struct ttm_range_mgr_node *node; - struct drm_mm *mm = &mgr->mm; - enum drm_mm_insert_mode mode; - unsigned i; + struct amdgpu_vram_mgr_resource *vres; + u64 size, remaining_size, lpfn, fpfn; + struct drm_buddy *mm = &mgr->mm; + struct drm_buddy_block *block; + unsigned long pages_per_block; int r; - lpfn = place->lpfn; + lpfn = place->lpfn << PAGE_SHIFT; if (!lpfn) - lpfn = man->size >> PAGE_SHIFT; + lpfn = man->size; + + fpfn = place->fpfn << PAGE_SHIFT; max_bytes = adev->gmc.mc_vram_size; if (tbo->type != ttm_bo_type_kernel) max_bytes -= AMDGPU_VM_RESERVED_VRAM; - mem_bytes = tbo->base.size; if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { - pages_per_node = ~0ul; - num_nodes = 1; + pages_per_block = ~0ul; } else { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - pages_per_node = HPAGE_PMD_NR; + pages_per_block = HPAGE_PMD_NR; #else /* default to 2MB */ - pages_per_node = 2UL << (20UL - PAGE_SHIFT); + pages_per_block = 2UL << (20UL - PAGE_SHIFT); #endif - pages_per_node = max_t(uint32_t, pages_per_node, - tbo->page_alignment); - num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node); + pages_per_block = max_t(uint32_t, pages_per_block, + tbo->page_alignment); } - node = kvmalloc(struct_size(node, mm_nodes, num_nodes), - GFP_KERNEL | __GFP_ZERO); - if (!node) + vres = kzalloc(sizeof(*vres), GFP_KERNEL); + if (!vres) return -ENOMEM; - ttm_resource_init(tbo, place, &node->base); + ttm_resource_init(tbo, place, &vres->base); /* bail out quickly if there's likely not enough VRAM for this BO */ if (ttm_resource_manager_usage(man) > max_bytes) { @@ -415,66 +401,130 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, goto error_fini; } - mode = DRM_MM_INSERT_BEST; + INIT_LIST_HEAD(&vres->blocks); + if (place->flags & TTM_PL_FLAG_TOPDOWN) - mode = DRM_MM_INSERT_HIGH; - - pages_left = node->base.num_pages; - - /* Limit maximum size to 2GB due to SG table limitations */ - pages = min(pages_left, 2UL << (30 - PAGE_SHIFT)); - - i = 0; - spin_lock(&mgr->lock); - while (pages_left) { - uint32_t alignment = tbo->page_alignment; - - if (pages >= pages_per_node) - alignment = pages_per_node; - - r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages, - alignment, 0, place->fpfn, - lpfn, mode); - if (unlikely(r)) { - if (pages > pages_per_node) { - if (is_power_of_2(pages)) - pages = pages / 2; - else - pages = rounddown_pow_of_two(pages); - continue; + vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; + + if (fpfn || lpfn != man->size) + /* Allocate blocks in desired range */ + vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; + + remaining_size = vres->base.num_pages << PAGE_SHIFT; + + mutex_lock(&mgr->lock); + while (remaining_size) { + if (tbo->page_alignment) + min_block_size = tbo->page_alignment << PAGE_SHIFT; + else + min_block_size = mgr->default_page_size; + + BUG_ON(min_block_size < mm->chunk_size); + + /* Limit maximum size to 2GiB due to SG table limitations */ + size = min(remaining_size, 2ULL << 30); + + if (size >= pages_per_block << PAGE_SHIFT) + min_block_size = pages_per_block << PAGE_SHIFT; + + cur_size = size; + + if (fpfn + size != place->lpfn << PAGE_SHIFT) { + /* + * Except for actual range allocation, modify the size and + * min_block_size conforming to continuous flag enablement + */ + if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { + size = roundup_pow_of_two(size); + min_block_size = size; + /* + * Modify the size value if size is not + * aligned with min_block_size + */ + } else if (!IS_ALIGNED(size, min_block_size)) { + size = round_up(size, min_block_size); } - goto error_free; } - vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]); - amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]); - pages_left -= pages; - ++i; + r = drm_buddy_alloc_blocks(mm, fpfn, + lpfn, + size, + min_block_size, + &vres->blocks, + vres->flags); + if (unlikely(r)) + goto error_free_blocks; + + if (size > remaining_size) + remaining_size = 0; + else + remaining_size -= size; + } + mutex_unlock(&mgr->lock); + + if (cur_size != size) { + struct drm_buddy_block *block; + struct list_head *trim_list; + u64 original_size; + LIST_HEAD(temp); + + trim_list = &vres->blocks; + original_size = vres->base.num_pages << PAGE_SHIFT; + + /* + * If size value is rounded up to min_block_size, trim the last + * block to the required size + */ + if (!list_is_singular(&vres->blocks)) { + block = list_last_entry(&vres->blocks, typeof(*block), link); + list_move_tail(&block->link, &temp); + trim_list = &temp; + /* + * Compute the original_size value by subtracting the + * last block size with (aligned size - original size) + */ + original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size); + } - if (pages > pages_left) - pages = pages_left; + mutex_lock(&mgr->lock); + drm_buddy_block_trim(mm, + original_size, + trim_list); + mutex_unlock(&mgr->lock); + + if (!list_empty(&temp)) + list_splice_tail(trim_list, &vres->blocks); + } + + list_for_each_entry(block, &vres->blocks, link) + vis_usage += amdgpu_vram_mgr_vis_size(adev, block); + + block = amdgpu_vram_mgr_first_block(&vres->blocks); + if (!block) { + r = -EINVAL; + goto error_fini; } - spin_unlock(&mgr->lock); - if (i == 1) - node->base.placement |= TTM_PL_FLAG_CONTIGUOUS; + vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT; + + if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks)) + vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS; if (adev->gmc.xgmi.connected_to_cpu) - node->base.bus.caching = ttm_cached; + vres->base.bus.caching = ttm_cached; else - node->base.bus.caching = ttm_write_combined; + vres->base.bus.caching = ttm_write_combined; atomic64_add(vis_usage, &mgr->vis_usage); - *res = &node->base; + *res = &vres->base; return 0; -error_free: - while (i--) - drm_mm_remove_node(&node->mm_nodes[i]); - spin_unlock(&mgr->lock); +error_free_blocks: + drm_buddy_free_list(mm, &vres->blocks); + mutex_unlock(&mgr->lock); error_fini: - ttm_resource_fini(man, &node->base); - kvfree(node); + ttm_resource_fini(man, &vres->base); + kfree(vres); return r; } @@ -490,27 +540,26 @@ error_fini: static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, struct ttm_resource *res) { - struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); + struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); + struct drm_buddy *mm = &mgr->mm; + struct drm_buddy_block *block; uint64_t vis_usage = 0; - unsigned i, pages; - spin_lock(&mgr->lock); - for (i = 0, pages = res->num_pages; pages; - pages -= node->mm_nodes[i].size, ++i) { - struct drm_mm_node *mm = &node->mm_nodes[i]; + mutex_lock(&mgr->lock); + list_for_each_entry(block, &vres->blocks, link) + vis_usage += amdgpu_vram_mgr_vis_size(adev, block); - drm_mm_remove_node(mm); - vis_usage += amdgpu_vram_mgr_vis_size(adev, mm); - } amdgpu_vram_mgr_do_reserve(man); - spin_unlock(&mgr->lock); + + drm_buddy_free_list(mm, &vres->blocks); + mutex_unlock(&mgr->lock); atomic64_sub(vis_usage, &mgr->vis_usage); ttm_resource_fini(man, res); - kvfree(node); + kfree(vres); } /** @@ -542,7 +591,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, if (!*sgt) return -ENOMEM; - /* Determine the number of DRM_MM nodes to export */ + /* Determine the number of DRM_BUDDY blocks to export */ amdgpu_res_first(res, offset, length, &cursor); while (cursor.remaining) { num_entries++; @@ -558,10 +607,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, sg->length = 0; /* - * Walk down DRM_MM nodes to populate scatterlist nodes - * @note: Use iterator api to get first the DRM_MM node + * Walk down DRM_BUDDY blocks to populate scatterlist nodes + * @note: Use iterator api to get first the DRM_BUDDY block * and the number of bytes from it. Access the following - * DRM_MM node(s) if more buffer needs to exported + * DRM_BUDDY block(s) if more buffer needs to exported */ amdgpu_res_first(res, offset, length, &cursor); for_each_sgtable_sg((*sgt), sg, i) { @@ -648,13 +697,22 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, struct drm_printer *printer) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); + struct drm_buddy *mm = &mgr->mm; + struct drm_buddy_block *block; drm_printf(printer, " vis usage:%llu\n", amdgpu_vram_mgr_vis_usage(mgr)); - spin_lock(&mgr->lock); - drm_mm_print(&mgr->mm, printer); - spin_unlock(&mgr->lock); + mutex_lock(&mgr->lock); + drm_printf(printer, "default_page_size: %lluKiB\n", + mgr->default_page_size >> 10); + + drm_buddy_print(mm, printer); + + drm_printf(printer, "reserved:\n"); + list_for_each_entry(block, &mgr->reserved_pages, link) + drm_buddy_block_print(mm, block, printer); + mutex_unlock(&mgr->lock); } static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { @@ -674,16 +732,21 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) { struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; struct ttm_resource_manager *man = &mgr->manager; + int err; ttm_resource_manager_init(man, &adev->mman.bdev, adev->gmc.real_vram_size); man->func = &amdgpu_vram_mgr_func; - drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT); - spin_lock_init(&mgr->lock); + err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); + if (err) + return err; + + mutex_init(&mgr->lock); INIT_LIST_HEAD(&mgr->reservations_pending); INIT_LIST_HEAD(&mgr->reserved_pages); + mgr->default_page_size = PAGE_SIZE; ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); ttm_resource_manager_set_used(man, true); @@ -711,16 +774,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) if (ret) return; - spin_lock(&mgr->lock); - list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) + mutex_lock(&mgr->lock); + list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) kfree(rsv); - list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) { - drm_mm_remove_node(&rsv->mm_node); + list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { + drm_buddy_free_list(&mgr->mm, &rsv->blocks); kfree(rsv); } - drm_mm_takedown(&mgr->mm); - spin_unlock(&mgr->lock); + drm_buddy_fini(&mgr->mm); + mutex_unlock(&mgr->lock); ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h new file mode 100644 index 000000000000..9a2db87186c7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: MIT + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_VRAM_MGR_H__ +#define __AMDGPU_VRAM_MGR_H__ + +#include <drm/drm_buddy.h> + +struct amdgpu_vram_mgr { + struct ttm_resource_manager manager; + struct drm_buddy mm; + /* protects access to buffer objects */ + struct mutex lock; + struct list_head reservations_pending; + struct list_head reserved_pages; + atomic64_t vis_usage; + u64 default_page_size; +}; + +struct amdgpu_vram_mgr_resource { + struct ttm_resource base; + struct list_head blocks; + unsigned long flags; +}; + +static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block) +{ + return drm_buddy_block_offset(block); +} + +static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block) +{ + return PAGE_SIZE << drm_buddy_block_order(block); +} + +static inline struct drm_buddy_block * +amdgpu_vram_mgr_first_block(struct list_head *list) +{ + return list_first_entry_or_null(list, struct drm_buddy_block, link); +} + +static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) +{ + struct drm_buddy_block *block; + u64 start, size; + + block = amdgpu_vram_mgr_first_block(head); + if (!block) + return false; + + while (head != block->link.next) { + start = amdgpu_vram_mgr_block_start(block); + size = amdgpu_vram_mgr_block_size(block); + + block = list_entry(block->link.next, struct drm_buddy_block, link); + if (start + size != amdgpu_vram_mgr_block_start(block)) + return false; + } + + return true; +} + +static inline struct amdgpu_vram_mgr_resource * +to_amdgpu_vram_mgr_resource(struct ttm_resource *res) +{ + return container_of(res, struct amdgpu_vram_mgr_resource, base); +} + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 49a2f594fb2c..87c41e0e9b7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -26,6 +26,8 @@ */ #include <drm/amdgpu_drm.h> +#include <drm/display/drm_dp_helper.h> + #include "amdgpu.h" #include "atom.h" @@ -34,7 +36,6 @@ #include "atombios_dp.h" #include "amdgpu_connectors.h" #include "amdgpu_atombios.h" -#include <drm/dp/drm_dp_helper.h> /* move these to drm_dp_helper.c/h */ #define DP_LINK_CONFIGURATION_SIZE 9 |