diff options
author | Christian König <christian.koenig@amd.com> | 2018-04-19 10:56:02 +0200 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-05-24 10:07:52 -0500 |
commit | af4c0f650b563c7b30c1d8cd2bb926247ceb19cc (patch) | |
tree | 92238b98187bcf270b5b8b269bd93a16f67af695 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | 22cc6c5e1958e5a08b4c44203d1810ab07ce5a16 (diff) |
drm/amdgpu: rework VM state machine lock handling v2
Only the moved state needs a separate spin lock protection. All other
states are protected by reserving the VM anyway.
v2: fix some more incorrect cases
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 66 |
1 files changed, 20 insertions, 46 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1a8f4e0dd023..f0deedcaf1c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -119,9 +119,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, * is currently evicted. add the bo to the evicted list to make sure it * is validated on next vm use to avoid fault. * */ - spin_lock(&vm->status_lock); list_move_tail(&base->vm_status, &vm->evicted); - spin_unlock(&vm->status_lock); } /** @@ -228,7 +226,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ttm_bo_global *glob = adev->mman.bdev.glob; int r; - spin_lock(&vm->status_lock); while (!list_empty(&vm->evicted)) { struct amdgpu_vm_bo_base *bo_base; struct amdgpu_bo *bo; @@ -236,10 +233,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, bo_base = list_first_entry(&vm->evicted, struct amdgpu_vm_bo_base, vm_status); - spin_unlock(&vm->status_lock); bo = bo_base->bo; - BUG_ON(!bo); if (bo->parent) { r = validate(param, bo); if (r) @@ -259,13 +254,14 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - spin_lock(&vm->status_lock); - if (bo->tbo.type != ttm_bo_type_kernel) + if (bo->tbo.type != ttm_bo_type_kernel) { + spin_lock(&vm->moved_lock); list_move(&bo_base->vm_status, &vm->moved); - else + spin_unlock(&vm->moved_lock); + } else { list_move(&bo_base->vm_status, &vm->relocated); + } } - spin_unlock(&vm->status_lock); return 0; } @@ -279,13 +275,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - bool ready; - - spin_lock(&vm->status_lock); - ready = list_empty(&vm->evicted); - spin_unlock(&vm->status_lock); - - return ready; + return list_empty(&vm->evicted); } /** @@ -477,9 +467,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, pt->parent = amdgpu_bo_ref(parent->base.bo); amdgpu_vm_bo_base_init(&entry->base, vm, pt); - spin_lock(&vm->status_lock); list_move(&entry->base.vm_status, &vm->relocated); - spin_unlock(&vm->status_lock); } if (level < AMDGPU_VM_PTB) { @@ -926,10 +914,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev, if (!entry->base.bo) continue; - spin_lock(&vm->status_lock); if (list_empty(&entry->base.vm_status)) list_add(&entry->base.vm_status, &vm->relocated); - spin_unlock(&vm->status_lock); amdgpu_vm_invalidate_level(adev, vm, entry, level + 1); } } @@ -974,7 +960,6 @@ restart: params.func = amdgpu_vm_do_set_ptes; } - spin_lock(&vm->status_lock); while (!list_empty(&vm->relocated)) { struct amdgpu_vm_bo_base *bo_base, *parent; struct amdgpu_vm_pt *pt, *entry; @@ -984,13 +969,10 @@ restart: struct amdgpu_vm_bo_base, vm_status); list_del_init(&bo_base->vm_status); - spin_unlock(&vm->status_lock); bo = bo_base->bo->parent; - if (!bo) { - spin_lock(&vm->status_lock); + if (!bo) continue; - } parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base, bo_list); @@ -999,12 +981,10 @@ restart: amdgpu_vm_update_pde(¶ms, vm, pt, entry); - spin_lock(&vm->status_lock); if (!vm->use_cpu_for_update && (ndw - params.ib->length_dw) < 32) break; } - spin_unlock(&vm->status_lock); if (vm->use_cpu_for_update) { /* Flush HDP */ @@ -1107,9 +1087,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, if (entry->huge) { /* Add the entry to the relocated list to update it. */ entry->huge = false; - spin_lock(&p->vm->status_lock); list_move(&entry->base.vm_status, &p->vm->relocated); - spin_unlock(&p->vm->status_lock); } return; } @@ -1588,8 +1566,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, amdgpu_asic_flush_hdp(adev, NULL); } - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); list_del_init(&bo_va->base.vm_status); + spin_unlock(&vm->moved_lock); /* If the BO is not in its preferred location add it back to * the evicted list so that it gets validated again on the @@ -1599,7 +1578,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, !(bo->preferred_domains & amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))) list_add_tail(&bo_va->base.vm_status, &vm->evicted); - spin_unlock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); bo_va->cleared = clear; @@ -1811,14 +1789,14 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, bool clear; int r = 0; - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); while (!list_empty(&vm->moved)) { struct amdgpu_bo_va *bo_va; struct reservation_object *resv; bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, base.vm_status); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->moved_lock); resv = bo_va->base.bo->tbo.resv; @@ -1839,9 +1817,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, if (!clear && resv != vm->root.base.bo->tbo.resv) reservation_object_unlock(resv); - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); } - spin_unlock(&vm->status_lock); + spin_unlock(&vm->moved_lock); return r; } @@ -1903,10 +1881,10 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, amdgpu_vm_prt_get(adev); if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); if (list_empty(&bo_va->base.vm_status)) list_add(&bo_va->base.vm_status, &vm->moved); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->moved_lock); } trace_amdgpu_vm_bo_map(bo_va, mapping); } @@ -2216,9 +2194,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_del(&bo_va->base.bo_list); - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); list_del(&bo_va->base.vm_status); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->moved_lock); list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); @@ -2261,28 +2239,24 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, bo_base->moved = true; if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { - spin_lock(&bo_base->vm->status_lock); if (bo->tbo.type == ttm_bo_type_kernel) list_move(&bo_base->vm_status, &vm->evicted); else list_move_tail(&bo_base->vm_status, &vm->evicted); - spin_unlock(&bo_base->vm->status_lock); continue; } if (bo->tbo.type == ttm_bo_type_kernel) { - spin_lock(&bo_base->vm->status_lock); if (list_empty(&bo_base->vm_status)) list_add(&bo_base->vm_status, &vm->relocated); - spin_unlock(&bo_base->vm->status_lock); continue; } - spin_lock(&bo_base->vm->status_lock); + spin_lock(&bo_base->vm->moved_lock); if (list_empty(&bo_base->vm_status)) list_add(&bo_base->vm_status, &vm->moved); - spin_unlock(&bo_base->vm->status_lock); + spin_unlock(&bo_base->vm->moved_lock); } } @@ -2391,9 +2365,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->va = RB_ROOT_CACHED; for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; - spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->evicted); INIT_LIST_HEAD(&vm->relocated); + spin_lock_init(&vm->moved_lock); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->freed); |