aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c38
1 files changed, 14 insertions, 24 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0e7dc23f78e7..418341a67517 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -53,7 +53,7 @@
* can be mapped as snooped (cached system pages) or unsnooped
* (uncached system pages).
* Each VM has an ID associated with it and there is a page table
- * associated with each VMID. When execting a command buffer,
+ * associated with each VMID. When executing a command buffer,
* the kernel tells the the ring what VMID to use for that command
* buffer. VMIDs are allocated dynamically as commands are submitted.
* The userspace drivers maintain their own address space and the kernel
@@ -768,11 +768,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
- * True if eviction list is empty.
+ * True if VM is not evicting.
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- return list_empty(&vm->evicted);
+ bool ret;
+
+ amdgpu_vm_eviction_lock(vm);
+ ret = !vm->evicting;
+ amdgpu_vm_eviction_unlock(vm);
+
+ return ret && list_empty(&vm->evicted);
}
/**
@@ -2102,30 +2108,14 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
struct dma_resv *resv = vm->root.bo->tbo.base.resv;
- struct dma_fence *excl, **shared;
- unsigned i, shared_count;
- int r;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
- r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
- if (r) {
- /* Not enough memory to grab the fence list, as last resort
- * block for all the fences to complete.
- */
- dma_resv_wait_timeout(resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
- return;
- }
-
- /* Add a callback for each fence in the reservation object */
- amdgpu_vm_prt_get(adev);
- amdgpu_vm_add_prt_cb(adev, excl);
-
- for (i = 0; i < shared_count; ++i) {
+ dma_resv_for_each_fence(&cursor, resv, true, fence) {
+ /* Add a callback for each fence in the reservation object */
amdgpu_vm_prt_get(adev);
- amdgpu_vm_add_prt_cb(adev, shared[i]);
+ amdgpu_vm_add_prt_cb(adev, fence);
}
-
- kfree(shared);
}
/**