diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 88 |
1 files changed, 56 insertions, 32 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 2a9a2593dc18..fcb711a11a5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -165,6 +165,26 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, atomic_read(&adev->gpu_reset_counter); } +/* Check if we need to switch to another set of resources */ +static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id, + struct amdgpu_job *job) +{ + return id->gds_base != job->gds_base || + id->gds_size != job->gds_size || + id->gws_base != job->gws_base || + id->gws_size != job->gws_size || + id->oa_base != job->oa_base || + id->oa_size != job->oa_size; +} + +/* Check if the id is compatible with the job */ +static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id, + struct amdgpu_job *job) +{ + return id->pd_gpu_addr == job->vm_pd_addr && + !amdgpu_vmid_gds_switch_needed(id, job); +} + /** * amdgpu_vmid_grab_idle - grab idle VMID * @@ -258,14 +278,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; uint64_t fence_context = adev->fence_context + ring->idx; bool needs_flush = vm->use_cpu_for_update; uint64_t updates = amdgpu_vm_tlb_seq(vm); int r; - *id = vm->reserved_vmid[vmhub]; + *id = id_mgr->reserved; if ((*id)->owner != vm->immediate.fence_context || - (*id)->pd_gpu_addr != job->vm_pd_addr || + !amdgpu_vmid_compatible(*id, job) || (*id)->flushed_updates < updates || !(*id)->last_flush || ((*id)->last_flush->context != fence_context && @@ -294,8 +315,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, if (r) return r; - (*id)->flushed_updates = updates; job->vm_needs_flush = needs_flush; + job->spm_update_needed = true; return 0; } @@ -333,7 +354,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, if ((*id)->owner != vm->immediate.fence_context) continue; - if ((*id)->pd_gpu_addr != job->vm_pd_addr) + if (!amdgpu_vmid_compatible(*id, job)) continue; if (!(*id)->last_flush || @@ -355,7 +376,6 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, if (r) return r; - (*id)->flushed_updates = updates; job->vm_needs_flush |= needs_flush; return 0; } @@ -408,22 +428,30 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r) goto error; - id->flushed_updates = amdgpu_vm_tlb_seq(vm); job->vm_needs_flush = true; } list_move_tail(&id->list, &id_mgr->ids_lru); } - id->pd_gpu_addr = job->vm_pd_addr; - id->owner = vm->immediate.fence_context; - + job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job); if (job->vm_needs_flush) { + id->flushed_updates = amdgpu_vm_tlb_seq(vm); dma_fence_put(id->last_flush); id->last_flush = NULL; } job->vmid = id - id_mgr->ids; job->pasid = vm->pasid; + + id->gds_base = job->gds_base; + id->gds_size = job->gds_size; + id->gws_base = job->gws_base; + id->gws_size = job->gws_size; + id->oa_base = job->oa_base; + id->oa_size = job->oa_size; + id->pd_gpu_addr = job->vm_pd_addr; + id->owner = vm->immediate.fence_context; + trace_amdgpu_vm_grab_id(vm, ring, job); error: @@ -435,31 +463,27 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned vmhub) { - struct amdgpu_vmid_mgr *id_mgr; - struct amdgpu_vmid *idle; - int r = 0; + struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; - id_mgr = &adev->vm_manager.id_mgr[vmhub]; mutex_lock(&id_mgr->lock); if (vm->reserved_vmid[vmhub]) goto unlock; - if (atomic_inc_return(&id_mgr->reserved_vmid_num) > - AMDGPU_VM_MAX_RESERVED_VMID) { - DRM_ERROR("Over limitation of reserved vmid\n"); - atomic_dec(&id_mgr->reserved_vmid_num); - r = -EINVAL; - goto unlock; + + ++id_mgr->reserved_use_count; + if (!id_mgr->reserved) { + struct amdgpu_vmid *id; + + id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, + list); + /* Remove from normal round robin handling */ + list_del_init(&id->list); + id_mgr->reserved = id; } - /* Select the first entry VMID */ - idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list); - list_del_init(&idle->list); - vm->reserved_vmid[vmhub] = idle; - mutex_unlock(&id_mgr->lock); + vm->reserved_vmid[vmhub] = true; - return 0; unlock: mutex_unlock(&id_mgr->lock); - return r; + return 0; } void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, @@ -469,12 +493,12 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[vmhub]) { - list_add(&vm->reserved_vmid[vmhub]->list, - &id_mgr->ids_lru); - vm->reserved_vmid[vmhub] = NULL; - atomic_dec(&id_mgr->reserved_vmid_num); + if (vm->reserved_vmid[vmhub] && + !--id_mgr->reserved_use_count) { + /* give the reserved ID back to normal round robin */ + list_add(&id_mgr->reserved->list, &id_mgr->ids_lru); } + vm->reserved_vmid[vmhub] = false; mutex_unlock(&id_mgr->lock); } @@ -541,7 +565,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) mutex_init(&id_mgr->lock); INIT_LIST_HEAD(&id_mgr->ids_lru); - atomic_set(&id_mgr->reserved_vmid_num, 0); + id_mgr->reserved_use_count = 0; /* manage only VMIDs not used by KFD */ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; |