From 32601d48d783ac66e22fb506409a414c426784c0 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 10 May 2017 20:06:58 +0200 Subject: drm/amdgpu: fix fundamental suspend/resume issue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reinitializing the VM manager during suspend/resume is a very very bad idea since all the VMs are still active and kicking. This can lead to random VM faults after resume when new processes become the same client ID assigned. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 07ff3b1514f1..1bf36c3542c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -672,6 +672,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vm_id *id = &id_mgr->ids[vmid]; + atomic64_set(&id->owner, 0); id->gds_base = 0; id->gds_size = 0; id->gws_base = 0; @@ -680,6 +681,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, id->oa_size = 0; } +/** + * amdgpu_vm_reset_all_id - reset VMID to zero + * + * @adev: amdgpu device structure + * + * Reset VMID to force flush on next use + */ +void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev) +{ + unsigned i, j; + + for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { + struct amdgpu_vm_id_manager *id_mgr = + &adev->vm_manager.id_mgr[i]; + + for (j = 1; j < id_mgr->num_ids; ++j) + amdgpu_vm_reset_id(adev, i, j); + } +} + /** * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo * @@ -2270,7 +2291,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) adev->vm_manager.seqno[i] = 0; - atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); atomic64_set(&adev->vm_manager.client_counter, 0); spin_lock_init(&adev->vm_manager.prt_lock); -- cgit From ca7962d8ccc3da48073c419700c0813095c39f25 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 11 May 2017 18:22:17 +0800 Subject: drm/amdgpu: fix NULL pointer panic of emit_gds_switch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ 338.384770] BUG: unable to handle kernel NULL pointer dereference at (null) [ 338.384817] IP: [< (null)>] (null) [ 338.385505] RIP: 0010:[<0000000000000000>] [< (null)>] (null) [ 338.385950] Call Trace: [ 338.385993] [] ? amdgpu_vm_flush+0x283/0x400 [amdgpu] [ 338.386025] [] ? printk+0x4d/0x4f [ 338.386074] [] amdgpu_ib_schedule+0x4a6/0x4d0 [amdgpu] [ 338.386140] [] amdgpu_job_run+0x64/0x180 [amdgpu] [ 338.386203] [] amd_sched_main+0x2e9/0x4a0 [amdgpu] [ 338.386232] [] ? prepare_to_wait_event+0x110/0x110 [ 338.386295] [] ? amd_sched_select_entity+0xe0/0xe0 [amdgpu] [ 338.386327] [] kthread+0xd3/0xf0 [ 338.386349] [] ? kthread_park+0x60/0x60 [ 338.386376] [] ret_from_fork+0x25/0x30 [ 338.386401] Code: Bad RIP value. [ 338.386420] RIP [< (null)>] (null) [ 338.386443] RSP [ 338.386458] CR2: 0000000000000000 [ 338.398508] ---[ end trace 4c66fcdc74b9a0a2 ]--- Signed-off-by: Chunming Zhou Reviewed-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1bf36c3542c1..8ecf82c5fe74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -634,7 +634,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) mutex_unlock(&id_mgr->lock); } - if (gds_switch_needed) { + if (ring->funcs->emit_gds_switch && gds_switch_needed) { id->gds_base = job->gds_base; id->gds_size = job->gds_size; id->gws_base = job->gws_base; -- cgit From cfbcacf42803a690be40068325d20d74b6093c8c Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 24 Apr 2017 11:09:04 +0800 Subject: drm/amdgpu: add vm ioctl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It will be used for reserving vmid for shader debugging that requires a fixed vmid. v2: fix warning (Alex) Signed-off-by: Chunming Zhou Reviewed-by: Junwei Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 ++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 + include/uapi/drm/amdgpu_drm.h | 22 ++++++++++++++++++++++ 4 files changed, 40 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index dca4be970d13..1dbe76c3c366 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -948,6 +948,7 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe) const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), /* KMS */ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8ecf82c5fe74..e4e2bacdb230 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2322,3 +2322,19 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) } } } + +int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) +{ + union drm_amdgpu_vm *args = data; + + switch (args->in.op) { + case AMDGPU_VM_OP_RESERVE_VMID: + case AMDGPU_VM_OP_UNRESERVE_VMID: + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index e1d951ece433..b10ce2d08685 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -239,5 +239,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size); +int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); #endif diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 6c249e5cfb09..56ceb3daaba5 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -51,6 +51,7 @@ extern "C" { #define DRM_AMDGPU_GEM_OP 0x10 #define DRM_AMDGPU_GEM_USERPTR 0x11 #define DRM_AMDGPU_WAIT_FENCES 0x12 +#define DRM_AMDGPU_VM 0x13 #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) @@ -65,6 +66,7 @@ extern "C" { #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences) +#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm) #define AMDGPU_GEM_DOMAIN_CPU 0x1 #define AMDGPU_GEM_DOMAIN_GTT 0x2 @@ -190,6 +192,26 @@ union drm_amdgpu_ctx { union drm_amdgpu_ctx_out out; }; +/* vm ioctl */ +#define AMDGPU_VM_OP_RESERVE_VMID 1 +#define AMDGPU_VM_OP_UNRESERVE_VMID 2 + +struct drm_amdgpu_vm_in { + /** AMDGPU_VM_OP_* */ + __u32 op; + __u32 flags; +}; + +struct drm_amdgpu_vm_out { + /** For future use, no flags defined so far */ + __u64 flags; +}; + +union drm_amdgpu_vm { + struct drm_amdgpu_vm_in in; + struct drm_amdgpu_vm_out out; +}; + /* * This is not a reliable API and you should expect it to fail for any * number of reasons and have fallback path that do not use userptr to -- cgit From 36bbf3bf9b23a9fe66558e2aa235f7d81c4a0727 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 20 Apr 2017 16:17:34 +0800 Subject: drm/amdgpu: add reserved vmid field in vm struct v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: rename dedicated_vmid to reserved_vmid Signed-off-by: Chunming Zhou Reviewed-by: Junwei Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 17 ++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 ++ 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index e4e2bacdb230..6eaeed08237c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2147,10 +2147,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) unsigned ring_instance; struct amdgpu_ring *ring; struct amd_sched_rq *rq; - int r; + int r, i; vm->va = RB_ROOT; vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); + for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) + vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->cleared); @@ -2235,6 +2237,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_bo_va_mapping *mapping, *tmp; bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; + int i; amd_sched_entity_fini(vm->entity.sched, &vm->entity); @@ -2258,6 +2261,18 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_vm_free_levels(&vm->root); dma_fence_put(vm->last_dir_update); + for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) { + struct amdgpu_vm_id_manager *id_mgr = + &adev->vm_manager.id_mgr[i]; + + mutex_lock(&id_mgr->lock); + if (vm->reserved_vmid[i]) { + list_add(&vm->reserved_vmid[i]->list, + &id_mgr->ids_lru); + vm->reserved_vmid[i] = NULL; + } + mutex_unlock(&id_mgr->lock); + } } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index b10ce2d08685..24ccf677f5a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -123,6 +123,8 @@ struct amdgpu_vm { /* client id */ u64 client_id; + /* dedicated to vm */ + struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; /* each VM will map on CSA */ struct amdgpu_bo_va *csa_bo_va; }; -- cgit From 1e9ef26fb385394c3b0267c8293f4178729d8719 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 20 Apr 2017 16:18:48 +0800 Subject: drm/amdgpu: reserve/unreserve vmid by vm ioctl v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add reserve/unreserve vmid funtions. Used to reserve vmids for certain shader debugging functionality that required a fixed vmid for the life of the debug. v3: only reserve vmid from gfxhub v4: fix racy condition Signed-off-by: Chunming Zhou Reviewed-by: Junwei Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 64 +++++++++++++++++++++++++++------- 1 file changed, 51 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6eaeed08237c..86ef0cdfd04d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -540,6 +540,45 @@ error: return r; } +static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + unsigned vmhub) +{ + struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + + mutex_lock(&id_mgr->lock); + if (vm->reserved_vmid[vmhub]) { + list_add(&vm->reserved_vmid[vmhub]->list, + &id_mgr->ids_lru); + vm->reserved_vmid[vmhub] = NULL; + } + mutex_unlock(&id_mgr->lock); +} + +static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + unsigned vmhub) +{ + struct amdgpu_vm_id_manager *id_mgr; + struct amdgpu_vm_id *idle; + int r = 0; + + id_mgr = &adev->vm_manager.id_mgr[vmhub]; + mutex_lock(&id_mgr->lock); + if (vm->reserved_vmid[vmhub]) + goto unlock; + /* Select the first entry VMID */ + idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list); + list_del_init(&idle->list); + vm->reserved_vmid[vmhub] = idle; + mutex_unlock(&id_mgr->lock); + + return 0; +unlock: + mutex_unlock(&id_mgr->lock); + return r; +} + static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -2261,18 +2300,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_vm_free_levels(&vm->root); dma_fence_put(vm->last_dir_update); - for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) { - struct amdgpu_vm_id_manager *id_mgr = - &adev->vm_manager.id_mgr[i]; - - mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[i]) { - list_add(&vm->reserved_vmid[i]->list, - &id_mgr->ids_lru); - vm->reserved_vmid[i] = NULL; - } - mutex_unlock(&id_mgr->lock); - } + for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) + amdgpu_vm_free_reserved_vmid(adev, vm, i); } /** @@ -2341,11 +2370,20 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { union drm_amdgpu_vm *args = data; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_fpriv *fpriv = filp->driver_priv; + int r; switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: + /* current, we only have requirement to reserve vmid from gfxhub */ + r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm, + AMDGPU_GFXHUB); + if (r) + return r; + break; case AMDGPU_VM_OP_UNRESERVE_VMID: - return -EINVAL; + amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB); break; default: return -EINVAL; -- cgit From c350577073de8fe21e6cdd798c4e4746d670bb47 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Fri, 21 Apr 2017 15:51:04 +0800 Subject: drm/amdgpu: add limitation for dedicated vm number v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Limit reserved vmids to 1 to avoid taking too many out of commission and starving the system. v2: move #define to amdgpu_vm.h v3: move reserved vmid counter to id_manager, and increase counter before allocating vmid v4: rename to reserved_vmid_num Signed-off-by: Chunming Zhou Reviewed-by: Junwei Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 9 +++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +++ 2 files changed, 12 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 86ef0cdfd04d..2ea91392290c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -551,6 +551,7 @@ static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev, list_add(&vm->reserved_vmid[vmhub]->list, &id_mgr->ids_lru); vm->reserved_vmid[vmhub] = NULL; + atomic_dec(&id_mgr->reserved_vmid_num); } mutex_unlock(&id_mgr->lock); } @@ -567,6 +568,13 @@ static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev, mutex_lock(&id_mgr->lock); if (vm->reserved_vmid[vmhub]) goto unlock; + if (atomic_inc_return(&id_mgr->reserved_vmid_num) > + AMDGPU_VM_MAX_RESERVED_VMID) { + DRM_ERROR("Over limitation of reserved vmid\n"); + atomic_dec(&id_mgr->reserved_vmid_num); + r = -EINVAL; + goto unlock; + } /* Select the first entry VMID */ idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list); list_del_init(&idle->list); @@ -2321,6 +2329,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) mutex_init(&id_mgr->lock); INIT_LIST_HEAD(&id_mgr->ids_lru); + atomic_set(&id_mgr->reserved_vmid_num, 0); /* skip over VMID 0, since it is the system VM */ for (j = 1; j < id_mgr->num_ids; ++j) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 24ccf677f5a5..fb25bb747730 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -84,6 +84,8 @@ struct amdgpu_bo_list_entry; /* hardcode that limit for now */ #define AMDGPU_VA_RESERVED_SIZE (8 << 20) +/* max vmids dedicated for process */ +#define AMDGPU_VM_MAX_RESERVED_VMID 1 struct amdgpu_vm_pt { struct amdgpu_bo *bo; @@ -154,6 +156,7 @@ struct amdgpu_vm_id_manager { unsigned num_ids; struct list_head ids_lru; struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; + atomic_t reserved_vmid_num; }; struct amdgpu_vm_manager { -- cgit From 7a63eb23d8817477f19f2cf51fbf7b27b221049c Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Fri, 21 Apr 2017 11:13:56 +0800 Subject: drm/amdgpu: implement grab reserved vmid V4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement the vmid reservation. v2: move sync waiting only when flush needs v3: fix racy v4: peek fence instead of get fence, and fix potential context starved. Signed-off-by: Chunming Zhou Reviewed-by: Junwei Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 79 ++++++++++++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2ea91392290c..11f49b81f653 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -391,6 +391,72 @@ static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev, atomic_read(&adev->gpu_reset_counter); } +static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub) +{ + return !!vm->reserved_vmid[vmhub]; +} + +/* idr_mgr->lock must be held */ +static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, + struct amdgpu_ring *ring, + struct amdgpu_sync *sync, + struct dma_fence *fence, + struct amdgpu_job *job) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmhub = ring->funcs->vmhub; + uint64_t fence_context = adev->fence_context + ring->idx; + struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub]; + struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct dma_fence *updates = sync->last_vm_update; + int r = 0; + struct dma_fence *flushed, *tmp; + bool needs_flush = false; + + flushed = id->flushed_updates; + if ((amdgpu_vm_had_gpu_reset(adev, id)) || + (atomic64_read(&id->owner) != vm->client_id) || + (job->vm_pd_addr != id->pd_gpu_addr) || + (updates && (!flushed || updates->context != flushed->context || + dma_fence_is_later(updates, flushed))) || + (!id->last_flush || (id->last_flush->context != fence_context && + !dma_fence_is_signaled(id->last_flush)))) { + needs_flush = true; + /* to prevent one context starved by another context */ + id->pd_gpu_addr = 0; + tmp = amdgpu_sync_peek_fence(&id->active, ring); + if (tmp) { + r = amdgpu_sync_fence(adev, sync, tmp); + return r; + } + } + + /* Good we can use this VMID. Remember this submission as + * user of the VMID. + */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence); + if (r) + goto out; + + if (updates && (!flushed || updates->context != flushed->context || + dma_fence_is_later(updates, flushed))) { + dma_fence_put(id->flushed_updates); + id->flushed_updates = dma_fence_get(updates); + } + id->pd_gpu_addr = job->vm_pd_addr; + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); + atomic64_set(&id->owner, vm->client_id); + job->vm_needs_flush = needs_flush; + if (needs_flush) { + dma_fence_put(id->last_flush); + id->last_flush = NULL; + } + job->vm_id = id - id_mgr->ids; + trace_amdgpu_vm_grab_id(vm, ring, job); +out: + return r; +} + /** * amdgpu_vm_grab_id - allocate the next free VMID * @@ -415,12 +481,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, unsigned i; int r = 0; + mutex_lock(&id_mgr->lock); + if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) { + r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job); + mutex_unlock(&id_mgr->lock); + return r; + } fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); - if (!fences) + if (!fences) { + mutex_unlock(&id_mgr->lock); return -ENOMEM; - - mutex_lock(&id_mgr->lock); - + } /* Check if we have an idle VMID */ i = 0; list_for_each_entry(idle, &id_mgr->ids_lru, list) { -- cgit From b9bf33d5ac55aa9f23b60b4d03017b2e59d02f02 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Thu, 11 May 2017 14:52:48 -0400 Subject: drm/amdgpu: make pipeline sync be in same place v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: directly return for 'if' case. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 32 +++++++++++++++++++++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 ++ 5 files changed, 34 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 37bd00345ae0..e2cafbd690c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1132,7 +1132,6 @@ struct amdgpu_job { void *owner; uint64_t fence_ctx; /* the fence_context this job uses */ bool vm_needs_flush; - bool need_pipeline_sync; unsigned vm_id; uint64_t vm_pd_addr; uint32_t gds_base, gds_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 631a9f77b973..cb9472f52e8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -121,7 +121,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; - struct dma_fence *tmp; + struct dma_fence *tmp = NULL; bool skip_preamble, need_ctx_switch; unsigned patch_offset = ~0; struct amdgpu_vm *vm; @@ -163,8 +163,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } if (ring->funcs->emit_pipeline_sync && job && - (tmp = amdgpu_sync_get_fence(&job->sched_sync))) { - job->need_pipeline_sync = true; + ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || + amdgpu_vm_need_pipeline_sync(ring, job))) { amdgpu_ring_emit_pipeline_sync(ring); dma_fence_put(tmp); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 4af92649c4a4..874979cc3207 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -57,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, (*job)->vm = vm; (*job)->ibs = (void *)&(*job)[1]; (*job)->num_ibs = num_ibs; - (*job)->need_pipeline_sync = false; amdgpu_sync_create(&(*job)->sync); amdgpu_sync_create(&(*job)->sched_sync); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 11f49b81f653..017258d6b9d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -694,6 +694,35 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr) return addr; } +bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, + struct amdgpu_job *job) +{ + struct amdgpu_device *adev = ring->adev; + unsigned vmhub = ring->funcs->vmhub; + struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; + struct amdgpu_vm_id *id; + bool gds_switch_needed; + bool vm_flush_needed = job->vm_needs_flush || + amdgpu_vm_ring_has_compute_vm_bug(ring); + + if (job->vm_id == 0) + return false; + id = &id_mgr->ids[job->vm_id]; + gds_switch_needed = ring->funcs->emit_gds_switch && ( + id->gds_base != job->gds_base || + id->gds_size != job->gds_size || + id->gws_base != job->gws_base || + id->gws_size != job->gws_size || + id->oa_base != job->oa_base || + id->oa_size != job->oa_size); + + if (amdgpu_vm_had_gpu_reset(adev, id)) + return true; + if (!vm_flush_needed && !gds_switch_needed) + return false; + return true; +} + /** * amdgpu_vm_flush - hardware flush the vm * @@ -732,9 +761,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) if (ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); - if (ring->funcs->emit_pipeline_sync && !job->need_pipeline_sync) - amdgpu_ring_emit_pipeline_sync(ring); - if (ring->funcs->emit_vm_flush && vm_flush_needed) { u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr); struct dma_fence *fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index fb25bb747730..9c7b15925aa7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -245,5 +245,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size); int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, + struct amdgpu_job *job); #endif -- cgit From bea396726d004df08a7d08972d5eeb792857c8bc Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 10 May 2017 13:02:39 +0800 Subject: drm/amdgpu: id reset count only is updated when used end v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit before that, we have function to check if reset happens by using reset count. v2: always update reset count after vm flush Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 017258d6b9d7..0da8a3005f6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -444,7 +444,6 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, id->flushed_updates = dma_fence_get(updates); } id->pd_gpu_addr = job->vm_pd_addr; - id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); atomic64_set(&id->owner, vm->client_id); job->vm_needs_flush = needs_flush; if (needs_flush) { @@ -592,7 +591,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, id->pd_gpu_addr = job->vm_pd_addr; dma_fence_put(id->flushed_updates); id->flushed_updates = dma_fence_get(updates); - id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); atomic64_set(&id->owner, vm->client_id); needs_flush: @@ -775,6 +773,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) mutex_lock(&id_mgr->lock); dma_fence_put(id->last_flush); id->last_flush = fence; + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); mutex_unlock(&id_mgr->lock); } -- cgit From de37e68a3dc6d526a5e42c393433eb66ac23cee4 Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Thu, 18 May 2017 13:56:22 +0800 Subject: drm/amdgpu: fix ocl test performance drop MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit partial revert commit <6971d3d> - drm/amdgpu: cleanup logic in amdgpu_vm_flush Signed-off-by: Flora Cui Reviewed-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0da8a3005f6f..4df54278993a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -743,8 +743,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) id->gws_size != job->gws_size || id->oa_base != job->oa_base || id->oa_size != job->oa_size); - bool vm_flush_needed = job->vm_needs_flush || - amdgpu_vm_ring_has_compute_vm_bug(ring); + bool vm_flush_needed = job->vm_needs_flush; unsigned patch_offset = 0; int r; -- cgit From 9a94f5a593c05c08ee309a55f618973427c5e074 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 12 May 2017 14:46:23 +0200 Subject: drm/amdgpu: move adjust adjust_mc_addr into the GFX9 vm_flush functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That GFX9 needs a PDE in the registers is entirely GFX9 specific. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 ++--- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 1 + drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 1 + 5 files changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 4df54278993a..3ecde81821ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -759,11 +759,10 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) patch_offset = amdgpu_ring_init_cond_exec(ring); if (ring->funcs->emit_vm_flush && vm_flush_needed) { - u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr); struct dma_fence *fence; - trace_amdgpu_vm_flush(ring, job->vm_id, pd_addr); - amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr); + trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr); + amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); r = amdgpu_fence_emit(ring, &fence); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9c6bd99babc9..e7130cdcd92f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3771,6 +3771,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->vm_inv_eng; + pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); pd_addr = pd_addr | 0x1; /* valid bit */ /* now only use physical base address of PDE and valid */ BUG_ON(pd_addr & 0xFFFF00000000003EULL); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 76cb766b93c3..332f2fdde5d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1143,6 +1143,7 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->vm_inv_eng; + pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); pd_addr = pd_addr | 0x1; /* valid bit */ /* now only use physical base address of PDE and valid */ BUG_ON(pd_addr & 0xFFFF00000000003EULL); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index aaa3662ca63a..74e3f23ac5e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1316,6 +1316,7 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, uint32_t data0, data1, mask; unsigned eng = ring->vm_inv_eng; + pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); pd_addr = pd_addr | 0x1; /* valid bit */ /* now only use physical base address of PDE and valid */ BUG_ON(pd_addr & 0xFFFF00000000003EULL); @@ -1357,6 +1358,7 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->vm_inv_eng; + pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); pd_addr = pd_addr | 0x1; /* valid bit */ /* now only use physical base address of PDE and valid */ BUG_ON(pd_addr & 0xFFFF00000000003EULL); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 77f1b60ab416..0012835e85c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -926,6 +926,7 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->vm_inv_eng; + pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); pd_addr = pd_addr | 0x1; /* valid bit */ /* now only use physical base address of PDE and valid */ BUG_ON(pd_addr & 0xFFFF00000000003EULL); -- cgit From b116632557a565dfdc2b7e5f8d67661a3ac3f835 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 12 May 2017 15:39:39 +0200 Subject: drm/amdgpu: cleanup adjust_mc_addr handling v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename adjust_mc_addr to get_vm_pde and check the address bits in one place. v2: handle vcn as well, keep setting the valid bit manually, add a BUG_ON() for GMC v6, v7 and v8 as well. v3: handle vcn_v1_0_enc_ring_emit_vm_flush as well. v4: fix the BUG_ON mask for GFX6-8 Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 26 +++++++++----------------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 6 ++---- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 9 ++++++++- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 9 ++++++++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 10 ++++++---- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 6 ++---- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 12 ++++-------- drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 6 ++---- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 12 ++++-------- 11 files changed, 55 insertions(+), 53 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a3576dbefa0f..abf5a58edc82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -308,8 +308,8 @@ struct amdgpu_gart_funcs { /* set pte flags based per asic */ uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev, uint32_t flags); - /* adjust mc addr in fb for APU case */ - u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr); + /* get the pde for a given mc addr */ + u64 (*get_vm_pde)(struct amdgpu_device *adev, u64 addr); uint32_t (*get_invalidate_req)(unsigned int vm_id); }; @@ -1813,6 +1813,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) +#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr)) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3ecde81821ad..c11903257b94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -682,16 +682,6 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) return false; } -static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr) -{ - u64 addr = mc_addr; - - if (adev->gart.gart_funcs->adjust_mc_addr) - addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr); - - return addr; -} - bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job) { @@ -1033,18 +1023,18 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { if (count) { - uint64_t pt_addr = - amdgpu_vm_adjust_mc_addr(adev, last_pt); + uint64_t entry; + entry = amdgpu_gart_get_vm_pde(adev, last_pt); if (shadow) amdgpu_vm_do_set_ptes(¶ms, last_shadow, - pt_addr, count, + entry, count, incr, AMDGPU_PTE_VALID); amdgpu_vm_do_set_ptes(¶ms, last_pde, - pt_addr, count, incr, + entry, count, incr, AMDGPU_PTE_VALID); } @@ -1058,13 +1048,15 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, } if (count) { - uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt); + uint64_t entry; + + entry = amdgpu_gart_get_vm_pde(adev, last_pt); if (vm->root.bo->shadow) - amdgpu_vm_do_set_ptes(¶ms, last_shadow, pt_addr, + amdgpu_vm_do_set_ptes(¶ms, last_shadow, entry, count, incr, AMDGPU_PTE_VALID); - amdgpu_vm_do_set_ptes(¶ms, last_pde, pt_addr, + amdgpu_vm_do_set_ptes(¶ms, last_pde, entry, count, incr, AMDGPU_PTE_VALID); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ec891b3f4a82..f97fc0dafc36 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3832,10 +3832,8 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->vm_inv_eng; - pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); - pd_addr = pd_addr | 0x1; /* valid bit */ - /* now only use physical base address of PDE and valid */ - BUG_ON(pd_addr & 0xFFFF00000000003EULL); + pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr); + pd_addr |= AMDGPU_PTE_VALID; gfx_v9_0_write_data_to_reg(ring, usepfp, true, hub->ctx0_ptb_addr_lo32 + (2 * vm_id), diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index d07ec13b42a8..569828ced31d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -395,6 +395,12 @@ static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev, return pte_flag; } +static uint64_t gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr) +{ + BUG_ON(addr & 0xFFFFFF0000000FFFULL); + return addr; +} + static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { @@ -1121,6 +1127,7 @@ static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = { .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb, .set_pte_pde = gmc_v6_0_gart_set_pte_pde, .set_prt = gmc_v6_0_set_prt, + .get_vm_pde = gmc_v6_0_get_vm_pde, .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 967505bd2fc8..8b39d9a4f801 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -472,6 +472,12 @@ static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev, return pte_flag; } +static uint64_t gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr) +{ + BUG_ON(addr & 0xFFFFFF0000000FFFULL); + return addr; +} + /** * gmc_v8_0_set_fault_enable_default - update VM fault handling * @@ -1293,7 +1299,8 @@ static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = { .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb, .set_pte_pde = gmc_v7_0_gart_set_pte_pde, .set_prt = gmc_v7_0_set_prt, - .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags + .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags, + .get_vm_pde = gmc_v7_0_get_vm_pde }; static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 3b5ea0f52d89..73a9653b4e60 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -656,6 +656,12 @@ static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev, return pte_flag; } +static uint64_t gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr) +{ + BUG_ON(addr & 0xFFFFFF0000000FFFULL); + return addr; +} + /** * gmc_v8_0_set_fault_enable_default - update VM fault handling * @@ -1612,7 +1618,8 @@ static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = { .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb, .set_pte_pde = gmc_v8_0_gart_set_pte_pde, .set_prt = gmc_v8_0_set_prt, - .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags + .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, + .get_vm_pde = gmc_v8_0_get_vm_pde }; static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 19e10276e585..047b1a7d20b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -358,17 +358,19 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, return pte_flag; } -static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr) +static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr) { - return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start; + addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start; + BUG_ON(addr & 0xFFFF00000000003FULL); + return addr; } static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = { .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb, .set_pte_pde = gmc_v9_0_gart_set_pte_pde, - .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, - .adjust_mc_addr = gmc_v9_0_adjust_mc_addr, .get_invalidate_req = gmc_v9_0_get_invalidate_req, + .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, + .get_vm_pde = gmc_v9_0_get_vm_pde }; static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 9cad3b118899..4a65697ccc94 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1124,10 +1124,8 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->vm_inv_eng; - pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); - pd_addr = pd_addr | 0x1; /* valid bit */ - /* now only use physical base address of PDE and valid */ - BUG_ON(pd_addr & 0xFFFF00000000003EULL); + pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr); + pd_addr |= AMDGPU_PTE_VALID; amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 74e3f23ac5e0..dd9ec81f116d 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1316,10 +1316,8 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, uint32_t data0, data1, mask; unsigned eng = ring->vm_inv_eng; - pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); - pd_addr = pd_addr | 0x1; /* valid bit */ - /* now only use physical base address of PDE and valid */ - BUG_ON(pd_addr & 0xFFFF00000000003EULL); + pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr); + pd_addr |= AMDGPU_PTE_VALID; data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2; data1 = upper_32_bits(pd_addr); @@ -1358,10 +1356,8 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->vm_inv_eng; - pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); - pd_addr = pd_addr | 0x1; /* valid bit */ - /* now only use physical base address of PDE and valid */ - BUG_ON(pd_addr & 0xFFFF00000000003EULL); + pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr); + pd_addr |= AMDGPU_PTE_VALID; amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 0012835e85c6..0b7fcc1b6c00 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -926,10 +926,8 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->vm_inv_eng; - pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); - pd_addr = pd_addr | 0x1; /* valid bit */ - /* now only use physical base address of PDE and valid */ - BUG_ON(pd_addr & 0xFFFF00000000003EULL); + pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr); + pd_addr |= AMDGPU_PTE_VALID; amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index e8c8c78e389a..ec33e8fa83c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -882,10 +882,8 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, uint32_t data0, data1, mask; unsigned eng = ring->vm_inv_eng; - pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); - pd_addr = pd_addr | 0x1; /* valid bit */ - /* now only use physical base address of PDE and valid */ - BUG_ON(pd_addr & 0xFFFF00000000003EULL); + pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr); + pd_addr |= AMDGPU_PTE_VALID; data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2; data1 = upper_32_bits(pd_addr); @@ -1015,10 +1013,8 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->vm_inv_eng; - pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr); - pd_addr = pd_addr | 0x1; /* valid bit */ - /* now only use physical base address of PDE and valid */ - BUG_ON(pd_addr & 0xFFFF00000000003EULL); + pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr); + pd_addr |= AMDGPU_PTE_VALID; amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, -- cgit From 92456b933cd257845587697a0875878849cef973 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 12 May 2017 16:09:26 +0200 Subject: drm/amdgpu: add some extra VM error handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If updating the PDs fails we now invalidate all entries to try again later. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c11903257b94..6af2d3c56f38 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1103,6 +1103,32 @@ error_free: return r; } +/* + * amdgpu_vm_invalidate_level - mark all PD levels as invalid + * + * @parent: parent PD + * + * Mark all PD level as invalid after an error. + */ +static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent) +{ + unsigned pt_idx; + + /* + * Recurse into the subdirectories. This recursion is harmless because + * we only have a maximum of 5 layers. + */ + for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { + struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; + + if (!entry->bo) + continue; + + entry->addr = ~0ULL; + amdgpu_vm_invalidate_level(entry); + } +} + /* * amdgpu_vm_update_directories - make sure that all directories are valid * @@ -1115,7 +1141,13 @@ error_free: int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - return amdgpu_vm_update_level(adev, vm, &vm->root, 0); + int r; + + r = amdgpu_vm_update_level(adev, vm, &vm->root, 0); + if (r) + amdgpu_vm_invalidate_level(&vm->root); + + return r; } /** -- cgit From cc28c4ed7e735fc09043054b22370202583f69c3 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Thu, 11 May 2017 22:39:31 -0400 Subject: drm/amdgpu: Return EINVAL if no PT BO MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change is also useful for the upcoming changes where page tables can be updated by CPU. Signed-off-by: Harish Kasiviswanathan Reviewed-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 48 +++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6af2d3c56f38..b2384b8536b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1187,8 +1187,9 @@ static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p, * @flags: mapping flags * * Update the page tables in the range @start - @end. + * Returns 0 for success, -EINVAL for failure. */ -static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, +static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, uint64_t start, uint64_t end, uint64_t dst, uint64_t flags) { @@ -1206,12 +1207,12 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, pt = amdgpu_vm_get_pt(params, addr); if (!pt) { pr_err("PT not found, aborting update_ptes\n"); - return; + return -EINVAL; } if (params->shadow) { if (!pt->shadow) - return; + return 0; pt = pt->shadow; } if ((addr & ~mask) == (end & ~mask)) @@ -1233,12 +1234,12 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, pt = amdgpu_vm_get_pt(params, addr); if (!pt) { pr_err("PT not found, aborting update_ptes\n"); - return; + return -EINVAL; } if (params->shadow) { if (!pt->shadow) - return; + return 0; pt = pt->shadow; } @@ -1273,6 +1274,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, params->func(params, cur_pe_start, cur_dst, cur_nptes, AMDGPU_GPU_PAGE_SIZE, flags); + + return 0; } /* @@ -1284,11 +1287,14 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, * @end: last PTE to handle * @dst: addr those PTEs should point to * @flags: hw mapping flags + * Returns 0 for success, -EINVAL for failure. */ -static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, +static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, uint64_t start, uint64_t end, uint64_t dst, uint64_t flags) { + int r; + /** * The MC L1 TLB supports variable sized pages, based on a fragment * field in the PTE. When this field is set to a non-zero value, page @@ -1317,28 +1323,30 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, /* system pages are non continuously */ if (params->src || !(flags & AMDGPU_PTE_VALID) || - (frag_start >= frag_end)) { - - amdgpu_vm_update_ptes(params, start, end, dst, flags); - return; - } + (frag_start >= frag_end)) + return amdgpu_vm_update_ptes(params, start, end, dst, flags); /* handle the 4K area at the beginning */ if (start != frag_start) { - amdgpu_vm_update_ptes(params, start, frag_start, - dst, flags); + r = amdgpu_vm_update_ptes(params, start, frag_start, + dst, flags); + if (r) + return r; dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE; } /* handle the area in the middle */ - amdgpu_vm_update_ptes(params, frag_start, frag_end, dst, - flags | frag_flags); + r = amdgpu_vm_update_ptes(params, frag_start, frag_end, dst, + flags | frag_flags); + if (r) + return r; /* handle the 4K area at the end */ if (frag_end != end) { dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE; - amdgpu_vm_update_ptes(params, frag_end, end, dst, flags); + r = amdgpu_vm_update_ptes(params, frag_end, end, dst, flags); } + return r; } /** @@ -1459,9 +1467,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, goto error_free; params.shadow = true; - amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); + r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); + if (r) + goto error_free; params.shadow = false; - amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); + r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); + if (r) + goto error_free; amdgpu_ring_pad_ib(ring, params.ib); WARN_ON(params.ib->length_dw > ndw); -- cgit From dd684d313e280c3bad2ebb7b33e7688ab5409bc9 Mon Sep 17 00:00:00 2001 From: Alex Xie Date: Tue, 30 May 2017 17:10:16 -0400 Subject: drm/amdgpu: Optimize a function called by every IB sheduling Move several if statements and a loop statment from run time to initialization time. Signed-off-by: Alex Xie Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 33 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 6 ++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 28 +-------------------------- 3 files changed, 40 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 6a85db0c0bc3..7d95435fad16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -152,6 +152,36 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) ring->funcs->end_use(ring); } +/** + * amdgpu_ring_check_compute_vm_bug - check whether this ring has compute vm bug + * + * @adev: amdgpu_device pointer + * @ring: amdgpu_ring structure holding ring information + */ +static void amdgpu_ring_check_compute_vm_bug(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + const struct amdgpu_ip_block *ip_block; + + ring->has_compute_vm_bug = false; + + if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) + /* only compute rings */ + return; + + ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); + if (!ip_block) + return; + + /* Compute ring has a VM bug for GFX version < 7. + And compute ring has a VM bug for GFX 8 MEC firmware version < 673.*/ + if (ip_block->version->major <= 7) { + ring->has_compute_vm_bug = true; + } else if (ip_block->version->major == 8) + if (adev->gfx.mec_fw_version < 673) + ring->has_compute_vm_bug = true; +} + /** * amdgpu_ring_init - init driver ring struct. * @@ -257,6 +287,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, if (amdgpu_debugfs_ring_init(adev, ring)) { DRM_ERROR("Failed to register debugfs file for rings !\n"); } + + amdgpu_ring_check_compute_vm_bug(adev, ring); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index a9223a8de8c2..334307efac8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -185,6 +185,7 @@ struct amdgpu_ring { u64 cond_exe_gpu_addr; volatile u32 *cond_exe_cpu_addr; unsigned vm_inv_eng; + bool has_compute_vm_bug; #if defined(CONFIG_DEBUG_FS) struct dentry *ent; #endif @@ -207,4 +208,9 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) } +static inline bool amdgpu_ring_has_compute_vm_bug(struct amdgpu_ring *ring) +{ + return ring->has_compute_vm_bug; +} + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b2384b8536b9..7a323f91a10b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -656,32 +656,6 @@ unlock: return r; } -static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - const struct amdgpu_ip_block *ip_block; - - if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) - /* only compute rings */ - return false; - - ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); - if (!ip_block) - return false; - - if (ip_block->version->major <= 7) { - /* gfx7 has no workaround */ - return true; - } else if (ip_block->version->major == 8) { - if (adev->gfx.mec_fw_version >= 673) - /* gfx8 is fixed in MEC firmware 673 */ - return false; - else - return true; - } - return false; -} - bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job) { @@ -691,7 +665,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_vm_id *id; bool gds_switch_needed; bool vm_flush_needed = job->vm_needs_flush || - amdgpu_vm_ring_has_compute_vm_bug(ring); + amdgpu_ring_has_compute_vm_bug(ring); if (job->vm_id == 0) return false; -- cgit From bb37b67d5729926ef0bf3dafdb5521b932aeb809 Mon Sep 17 00:00:00 2001 From: Alex Xie Date: Tue, 30 May 2017 23:50:10 -0400 Subject: drm/amdgpu: Remove two ! operations in an if condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the code easier to understand. Signed-off-by: Alex Xie Reviewed-by: Michel Dänzer Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7a323f91a10b..90392a15fcb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -680,9 +680,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, if (amdgpu_vm_had_gpu_reset(adev, id)) return true; - if (!vm_flush_needed && !gds_switch_needed) - return false; - return true; + + return vm_flush_needed || gds_switch_needed; } /** -- cgit From 53e2e91ddad79f9d42bab5a69ef293a1f1f5d6d7 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 15 May 2017 15:19:10 +0200 Subject: drm/amdgpu: cache the complete pde MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Makes it easier to update the PDE with huge pages. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 90392a15fcb7..3d2ad3ae04bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -985,6 +985,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, } pt = amdgpu_bo_gpu_offset(bo); + pt = amdgpu_gart_get_vm_pde(adev, pt); if (parent->entries[pt_idx].addr == pt) continue; @@ -996,18 +997,15 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { if (count) { - uint64_t entry; - - entry = amdgpu_gart_get_vm_pde(adev, last_pt); if (shadow) amdgpu_vm_do_set_ptes(¶ms, last_shadow, - entry, count, + last_pt, count, incr, AMDGPU_PTE_VALID); amdgpu_vm_do_set_ptes(¶ms, last_pde, - entry, count, incr, + last_pt, count, incr, AMDGPU_PTE_VALID); } @@ -1021,15 +1019,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, } if (count) { - uint64_t entry; - - entry = amdgpu_gart_get_vm_pde(adev, last_pt); - if (vm->root.bo->shadow) - amdgpu_vm_do_set_ptes(¶ms, last_shadow, entry, + amdgpu_vm_do_set_ptes(¶ms, last_shadow, last_pt, count, incr, AMDGPU_PTE_VALID); - amdgpu_vm_do_set_ptes(¶ms, last_pde, entry, + amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID); } -- cgit From 301654a4f6c4a0cfcee14a4b2737165ecbd51ce8 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 16 May 2017 14:30:27 +0200 Subject: drm/amdgpu: stop joining VM PTE updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This isn't beneficial any more since VRAM allocations are now split so that they fits into a single page table. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 61 ++++------------------------------ 1 file changed, 7 insertions(+), 54 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3d2ad3ae04bc..d4d05a819603 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1163,41 +1163,12 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, struct amdgpu_device *adev = params->adev; const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1; - uint64_t cur_pe_start, cur_nptes, cur_dst; - uint64_t addr; /* next GPU address to be updated */ + uint64_t addr, pe_start; struct amdgpu_bo *pt; - unsigned nptes; /* next number of ptes to be updated */ - uint64_t next_pe_start; - - /* initialize the variables */ - addr = start; - pt = amdgpu_vm_get_pt(params, addr); - if (!pt) { - pr_err("PT not found, aborting update_ptes\n"); - return -EINVAL; - } - - if (params->shadow) { - if (!pt->shadow) - return 0; - pt = pt->shadow; - } - if ((addr & ~mask) == (end & ~mask)) - nptes = end - addr; - else - nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); - - cur_pe_start = amdgpu_bo_gpu_offset(pt); - cur_pe_start += (addr & mask) * 8; - cur_nptes = nptes; - cur_dst = dst; - - /* for next ptb*/ - addr += nptes; - dst += nptes * AMDGPU_GPU_PAGE_SIZE; + unsigned nptes; /* walk over the address space and update the page tables */ - while (addr < end) { + for (addr = start; addr < end; addr += nptes) { pt = amdgpu_vm_get_pt(params, addr); if (!pt) { pr_err("PT not found, aborting update_ptes\n"); @@ -1215,33 +1186,15 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, else nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); - next_pe_start = amdgpu_bo_gpu_offset(pt); - next_pe_start += (addr & mask) * 8; - - if ((cur_pe_start + 8 * cur_nptes) == next_pe_start && - ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) { - /* The next ptb is consecutive to current ptb. - * Don't call the update function now. - * Will update two ptbs together in future. - */ - cur_nptes += nptes; - } else { - params->func(params, cur_pe_start, cur_dst, cur_nptes, - AMDGPU_GPU_PAGE_SIZE, flags); + pe_start = amdgpu_bo_gpu_offset(pt); + pe_start += (addr & mask) * 8; - cur_pe_start = next_pe_start; - cur_nptes = nptes; - cur_dst = dst; - } + params->func(params, pe_start, dst, nptes, + AMDGPU_GPU_PAGE_SIZE, flags); - /* for next ptb*/ - addr += nptes; dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - params->func(params, cur_pe_start, cur_dst, cur_nptes, - AMDGPU_GPU_PAGE_SIZE, flags); - return 0; } -- cgit From e59c020598666ffc22c627910667e44ac2412304 Mon Sep 17 00:00:00 2001 From: Alex Xie Date: Thu, 1 Jun 2017 09:42:59 -0400 Subject: drm/amdgpu: Move compute vm bug logic to amdgpu_vm.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In review, Christian would like to keep the logic inside amdgpu_vm.c with a cost of slightly slower. The loop is still optimized out with this patch. v2: remove the if statement. Now it is not slower. Signed-off-by: Alex Xie Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 32 ------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 5 ---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 38 ++++++++++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 + 5 files changed, 39 insertions(+), 39 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index cce94d836221..01ee69b37a2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2190,6 +2190,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->accel_working = true; + amdgpu_vm_check_compute_bug(adev); + /* Initialize the buffer migration limit. */ if (amdgpu_moverate >= 0) max_MBps = amdgpu_moverate; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index cef135ef7334..75165e07b1cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -154,36 +154,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) ring->funcs->end_use(ring); } -/** - * amdgpu_ring_check_compute_vm_bug - check whether this ring has compute vm bug - * - * @adev: amdgpu_device pointer - * @ring: amdgpu_ring structure holding ring information - */ -static void amdgpu_ring_check_compute_vm_bug(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - const struct amdgpu_ip_block *ip_block; - - ring->has_compute_vm_bug = false; - - if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) - /* only compute rings */ - return; - - ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); - if (!ip_block) - return; - - /* Compute ring has a VM bug for GFX version < 7. - And compute ring has a VM bug for GFX 8 MEC firmware version < 673.*/ - if (ip_block->version->major <= 7) { - ring->has_compute_vm_bug = true; - } else if (ip_block->version->major == 8) - if (adev->gfx.mec_fw_version < 673) - ring->has_compute_vm_bug = true; -} - /** * amdgpu_ring_init - init driver ring struct. * @@ -292,8 +262,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, DRM_ERROR("Failed to register debugfs file for rings !\n"); } - amdgpu_ring_check_compute_vm_bug(adev, ring); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index a1dd9077c064..bc8dec992f73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -212,9 +212,4 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) } -static inline bool amdgpu_ring_has_compute_vm_bug(struct amdgpu_ring *ring) -{ - return ring->has_compute_vm_bug; -} - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d4d05a819603..6e32748d224e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -656,6 +656,41 @@ unlock: return r; } +/** + * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug + * + * @adev: amdgpu_device pointer + */ +void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) +{ + const struct amdgpu_ip_block *ip_block; + bool has_compute_vm_bug; + struct amdgpu_ring *ring; + int i; + + has_compute_vm_bug = false; + + ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); + if (ip_block) { + /* Compute has a VM bug for GFX version < 7. + Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ + if (ip_block->version->major <= 7) + has_compute_vm_bug = true; + else if (ip_block->version->major == 8) + if (adev->gfx.mec_fw_version < 673) + has_compute_vm_bug = true; + } + + for (i = 0; i < adev->num_rings; i++) { + ring = adev->rings[i]; + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + /* only compute rings */ + ring->has_compute_vm_bug = has_compute_vm_bug; + else + ring->has_compute_vm_bug = false; + } +} + bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job) { @@ -664,8 +699,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vm_id *id; bool gds_switch_needed; - bool vm_flush_needed = job->vm_needs_flush || - amdgpu_ring_has_compute_vm_bug(ring); + bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; if (job->vm_id == 0) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 8309bc7ffd71..f5dba9cea587 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -245,5 +245,6 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size); int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job); +void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); #endif -- cgit From 9a4b7d4c769e7513dec8f441de1f521ec4ead4b6 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Fri, 9 Jun 2017 11:26:57 -0400 Subject: drm/amdgpu: Add vm context module param MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add VM update mode module param (amdgpu.vm_update_mode) that can used to control how VM pde/pte are updated for Graphics and Compute. BIT0 controls Graphics and BIT1 Compute. BIT0 [= 0] Graphics updated by SDMA [= 1] by CPU BIT1 [= 0] Compute updated by SDMA [= 1] by CPU By default, only for large BAR system vm_update_mode = 2, indicating that Graphics VMs will be updated via SDMA and Compute VMs will be updated via CPU. And for all all other systems (by default) vm_update_mode = 0 Signed-off-by: Harish Kasiviswanathan Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 36 ++++++++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 20 +++++++++++++++++- 5 files changed, 61 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3a0561cc7ed1..1ae9ca239188 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -95,6 +95,7 @@ extern int amdgpu_vm_size; extern int amdgpu_vm_block_size; extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_debug; +extern int amdgpu_vm_update_mode; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; extern int amdgpu_no_evict; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 9c6cdfd45e49..4d2bad404100 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -95,6 +95,7 @@ int amdgpu_vm_block_size = -1; int amdgpu_vm_fault_stop = 0; int amdgpu_vm_debug = 0; int amdgpu_vram_page_split = 512; +int amdgpu_vm_update_mode = -1; int amdgpu_exp_hw_support = 0; int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; @@ -181,6 +182,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444); MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)"); module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); +MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both"); +module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444); + MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)"); module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 92e93b34118c..12497a40ef92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -821,7 +821,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) goto out_suspend; } - r = amdgpu_vm_init(adev, &fpriv->vm); + r = amdgpu_vm_init(adev, &fpriv->vm, + AMDGPU_VM_CONTEXT_GFX); if (r) { kfree(fpriv); goto out_suspend; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6e32748d224e..2db10b665381 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -718,6 +718,11 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, return vm_flush_needed || gds_switch_needed; } +static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) +{ + return (adev->mc.real_vram_size == adev->mc.visible_vram_size); +} + /** * amdgpu_vm_flush - hardware flush the vm * @@ -2268,10 +2273,12 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size) * * @adev: amdgpu_device pointer * @vm: requested vm + * @vm_context: Indicates if it GFX or Compute context * * Init @vm fields. */ -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, + int vm_context) { const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT(adev) * 8); @@ -2300,6 +2307,16 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) if (r) return r; + if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) + vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & + AMDGPU_VM_USE_CPU_FOR_COMPUTE); + else + vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & + AMDGPU_VM_USE_CPU_FOR_GFX); + DRM_DEBUG_DRIVER("VM update mode is %s\n", + vm->use_cpu_for_update ? "CPU" : "SDMA"); + WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)), + "CPU update of VM recommended only for large BAR system\n"); vm->last_dir_update = NULL; r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, @@ -2432,6 +2449,23 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) atomic64_set(&adev->vm_manager.client_counter, 0); spin_lock_init(&adev->vm_manager.prt_lock); atomic_set(&adev->vm_manager.num_prt_users, 0); + + /* If not overridden by the user, by default, only in large BAR systems + * Compute VM tables will be updated by CPU + */ +#ifdef CONFIG_X86_64 + if (amdgpu_vm_update_mode == -1) { + if (amdgpu_vm_is_large_bar(adev)) + adev->vm_manager.vm_update_mode = + AMDGPU_VM_USE_CPU_FOR_COMPUTE; + else + adev->vm_manager.vm_update_mode = 0; + } else + adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; +#else + adev->vm_manager.vm_update_mode = 0; +#endif + } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index f5dba9cea587..936f158bc5ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -87,6 +87,14 @@ struct amdgpu_bo_list_entry; /* max vmids dedicated for process */ #define AMDGPU_VM_MAX_RESERVED_VMID 1 +#define AMDGPU_VM_CONTEXT_GFX 0 +#define AMDGPU_VM_CONTEXT_COMPUTE 1 + +/* See vm_update_mode */ +#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) +#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) + + struct amdgpu_vm_pt { struct amdgpu_bo *bo; uint64_t addr; @@ -129,6 +137,9 @@ struct amdgpu_vm { struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; /* each VM will map on CSA */ struct amdgpu_bo_va *csa_bo_va; + + /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ + bool use_cpu_for_update; }; struct amdgpu_vm_id { @@ -184,11 +195,18 @@ struct amdgpu_vm_manager { /* partial resident texture handling */ spinlock_t prt_lock; atomic_t num_prt_users; + + /* controls how VM page tables are updated for Graphics and Compute. + * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU + * BIT1[= 0] Compute updated by SDMA [= 1] by CPU + */ + int vm_update_mode; }; void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, + int vm_context); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, -- cgit From 3c8241722bc4f5879db42d3acd7fa840c8e608e7 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Thu, 11 May 2017 15:50:08 -0400 Subject: drm/amdgpu: Support page directory update via CPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If amdgpu.vm_update_context param is set to use CPU, then Page Directories will be updated by CPU instead of SDMA v2: Call amdgpu_vm_bo_wait before updating the page tables to ensure the PD/PT BOs are free v3: Minor changes - due to amdgpu_vm_bo_wait() prototype change, local variable declaration order and function comments. Signed-off-by: Harish Kasiviswanathan Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 161 ++++++++++++++++++++++++--------- 1 file changed, 119 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2db10b665381..caca6896476c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -275,6 +275,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, adev->vm_manager.block_size; unsigned pt_idx, from, to; int r; + u64 flags; if (!parent->entries) { unsigned num_entries = amdgpu_vm_num_entries(adev, level); @@ -299,6 +300,14 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, saddr = saddr & ((1 << shift) - 1); eaddr = eaddr & ((1 << shift) - 1); + flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | + AMDGPU_GEM_CREATE_VRAM_CLEARED; + if (vm->use_cpu_for_update) + flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + else + flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_SHADOW); + /* walk over the address space and allocate the page tables */ for (pt_idx = from; pt_idx <= to; ++pt_idx) { struct reservation_object *resv = vm->root.bo->tbo.resv; @@ -310,10 +319,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, amdgpu_vm_bo_size(adev, level), AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_NO_CPU_ACCESS | - AMDGPU_GEM_CREATE_SHADOW | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED, + flags, NULL, resv, &pt); if (r) return r; @@ -948,6 +954,49 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) return result; } +/** + * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU + * + * @params: see amdgpu_pte_update_params definition + * @pe: kmap addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: hw access flags + * + * Write count number of PT/PD entries directly. + */ +static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, + uint64_t pe, uint64_t addr, + unsigned count, uint32_t incr, + uint64_t flags) +{ + unsigned int i; + + for (i = 0; i < count; i++) { + amdgpu_gart_set_pte_pde(params->adev, (void *)pe, + i, addr, flags); + addr += incr; + } + + /* Flush HDP */ + mb(); + amdgpu_gart_flush_gpu_tlb(params->adev, 0); +} + +static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo) +{ + struct amdgpu_sync sync; + int r; + + amdgpu_sync_create(&sync); + amdgpu_sync_resv(adev, &sync, bo->tbo.resv, AMDGPU_FENCE_OWNER_VM); + r = amdgpu_sync_wait(&sync, true); + amdgpu_sync_free(&sync); + + return r; +} + /* * amdgpu_vm_update_level - update a single level in the hierarchy * @@ -977,34 +1026,54 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, if (!parent->entries) return 0; - ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - /* padding, etc. */ - ndw = 64; + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + shadow = parent->bo->shadow; - /* assume the worst case */ - ndw += parent->last_entry_used * 6; + WARN_ON(vm->use_cpu_for_update && shadow); + if (vm->use_cpu_for_update && !shadow) { + r = amdgpu_bo_kmap(parent->bo, (void **)&pd_addr); + if (r) + return r; + r = amdgpu_vm_bo_wait(adev, parent->bo); + if (unlikely(r)) { + amdgpu_bo_kunmap(parent->bo); + return r; + } + params.func = amdgpu_vm_cpu_set_ptes; + } else { + if (shadow) { + r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); + if (r) + return r; + } + ring = container_of(vm->entity.sched, struct amdgpu_ring, + sched); - pd_addr = amdgpu_bo_gpu_offset(parent->bo); + /* padding, etc. */ + ndw = 64; - shadow = parent->bo->shadow; - if (shadow) { - r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); + /* assume the worst case */ + ndw += parent->last_entry_used * 6; + + pd_addr = amdgpu_bo_gpu_offset(parent->bo); + + if (shadow) { + shadow_addr = amdgpu_bo_gpu_offset(shadow); + ndw *= 2; + } else { + shadow_addr = 0; + } + + r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); if (r) return r; - shadow_addr = amdgpu_bo_gpu_offset(shadow); - ndw *= 2; - } else { - shadow_addr = 0; - } - r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); - if (r) - return r; + params.ib = &job->ibs[0]; + params.func = amdgpu_vm_do_set_ptes; + } - memset(¶ms, 0, sizeof(params)); - params.adev = adev; - params.ib = &job->ibs[0]; /* walk over the address space and update the directory */ for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { @@ -1037,15 +1106,15 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, if (count) { if (shadow) - amdgpu_vm_do_set_ptes(¶ms, - last_shadow, - last_pt, count, - incr, - AMDGPU_PTE_VALID); - - amdgpu_vm_do_set_ptes(¶ms, last_pde, - last_pt, count, incr, - AMDGPU_PTE_VALID); + params.func(¶ms, + last_shadow, + last_pt, count, + incr, + AMDGPU_PTE_VALID); + + params.func(¶ms, last_pde, + last_pt, count, incr, + AMDGPU_PTE_VALID); } count = 1; @@ -1059,14 +1128,16 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, if (count) { if (vm->root.bo->shadow) - amdgpu_vm_do_set_ptes(¶ms, last_shadow, last_pt, - count, incr, AMDGPU_PTE_VALID); + params.func(¶ms, last_shadow, last_pt, + count, incr, AMDGPU_PTE_VALID); - amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, - count, incr, AMDGPU_PTE_VALID); + params.func(¶ms, last_pde, last_pt, + count, incr, AMDGPU_PTE_VALID); } - if (params.ib->length_dw == 0) { + if (params.func == amdgpu_vm_cpu_set_ptes) + amdgpu_bo_kunmap(parent->bo); + else if (params.ib->length_dw == 0) { amdgpu_job_free(job); } else { amdgpu_ring_pad_ib(ring, params.ib); @@ -2286,6 +2357,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_ring *ring; struct amd_sched_rq *rq; int r, i; + u64 flags; vm->va = RB_ROOT; vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); @@ -2319,12 +2391,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, "CPU update of VM recommended only for large BAR system\n"); vm->last_dir_update = NULL; + flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | + AMDGPU_GEM_CREATE_VRAM_CLEARED; + if (vm->use_cpu_for_update) + flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + else + flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_SHADOW); + r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_NO_CPU_ACCESS | - AMDGPU_GEM_CREATE_SHADOW | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED, + flags, NULL, NULL, &vm->root.bo); if (r) goto error_free_sched_entity; -- cgit From b4d42511b7f2269f4cca3c02cd6e4c58099f9108 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Thu, 11 May 2017 19:47:22 -0400 Subject: drm/amdgpu: Support page table update via CPU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: Fix logical mistake. If CPU update failed amdgpu_vm_bo_update_mapping() would not return and instead fall through to SDMA update. Minor change due to amdgpu_vm_bo_wait() prototype change Signed-off-by: Harish Kasiviswanathan Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 88 +++++++++++++++++++++++++++++++++- 1 file changed, 87 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index caca6896476c..c4f1a305c68c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -79,6 +79,12 @@ struct amdgpu_pte_update_params { uint64_t flags); /* indicate update pt or its shadow */ bool shadow; + /* The next two are used during VM update by CPU + * DMA addresses to use for mapping + * Kernel pointer of PD/PT BO that needs to be updated + */ + dma_addr_t *pages_addr; + void *kptr; }; /* Helper to disable partial resident texture feature from a fence callback */ @@ -972,10 +978,14 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, uint64_t flags) { unsigned int i; + uint64_t value; for (i = 0; i < count; i++) { + value = params->pages_addr ? + amdgpu_vm_map_gart(params->pages_addr, addr) : + addr; amdgpu_gart_set_pte_pde(params->adev, (void *)pe, - i, addr, flags); + i, value, flags); addr += incr; } @@ -1253,6 +1263,59 @@ static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p, return entry->bo; } +/** + * amdgpu_vm_update_ptes_cpu - Update the page tables in the range + * start - @end using CPU. + * See amdgpu_vm_update_ptes for parameter description. + * + */ +static int amdgpu_vm_update_ptes_cpu(struct amdgpu_pte_update_params *params, + uint64_t start, uint64_t end, + uint64_t dst, uint64_t flags) +{ + struct amdgpu_device *adev = params->adev; + const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1; + void *pe_ptr; + uint64_t addr; + struct amdgpu_bo *pt; + unsigned int nptes; + int r; + + /* initialize the variables */ + addr = start; + + /* walk over the address space and update the page tables */ + while (addr < end) { + pt = amdgpu_vm_get_pt(params, addr); + if (!pt) { + pr_err("PT not found, aborting update_ptes\n"); + return -EINVAL; + } + + WARN_ON(params->shadow); + + r = amdgpu_bo_kmap(pt, &pe_ptr); + if (r) + return r; + + pe_ptr += (addr & mask) * 8; + + if ((addr & ~mask) == (end & ~mask)) + nptes = end - addr; + else + nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); + + params->func(params, (uint64_t)pe_ptr, dst, nptes, + AMDGPU_GPU_PAGE_SIZE, flags); + + amdgpu_bo_kunmap(pt); + addr += nptes; + dst += nptes * AMDGPU_GPU_PAGE_SIZE; + } + + return 0; +} + /** * amdgpu_vm_update_ptes - make sure that page tables are valid * @@ -1277,6 +1340,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, struct amdgpu_bo *pt; unsigned nptes; + if (params->func == amdgpu_vm_cpu_set_ptes) + return amdgpu_vm_update_ptes_cpu(params, start, end, + dst, flags); + /* walk over the address space and update the page tables */ for (addr = start; addr < end; addr += nptes) { pt = amdgpu_vm_get_pt(params, addr); @@ -1418,6 +1485,25 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, params.vm = vm; params.src = src; + if (vm->use_cpu_for_update) { + /* params.src is used as flag to indicate system Memory */ + if (pages_addr) + params.src = ~0; + + /* Wait for PT BOs to be free. PTs share the same resv. object + * as the root PD BO + */ + r = amdgpu_vm_bo_wait(adev, vm->root.bo); + if (unlikely(r)) + return r; + + params.func = amdgpu_vm_cpu_set_ptes; + params.pages_addr = pages_addr; + params.shadow = false; + return amdgpu_vm_frag_ptes(¶ms, start, last + 1, + addr, flags); + } + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); /* sync to everything on unmapping */ -- cgit From 370f092f30ee6fa0be6eb14d2ddb66ef861c6a3f Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Fri, 9 Jun 2017 17:47:27 -0400 Subject: drm/amdgpu: vm_update_ptes remove code duplication MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CPU and GPU paths were mostly the same. Acked-by: Christian König Acked-by: Alex Deucher Signed-off-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 73 ++++++++-------------------------- 1 file changed, 16 insertions(+), 57 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c4f1a305c68c..c308047bfb13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1263,59 +1263,6 @@ static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p, return entry->bo; } -/** - * amdgpu_vm_update_ptes_cpu - Update the page tables in the range - * start - @end using CPU. - * See amdgpu_vm_update_ptes for parameter description. - * - */ -static int amdgpu_vm_update_ptes_cpu(struct amdgpu_pte_update_params *params, - uint64_t start, uint64_t end, - uint64_t dst, uint64_t flags) -{ - struct amdgpu_device *adev = params->adev; - const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1; - void *pe_ptr; - uint64_t addr; - struct amdgpu_bo *pt; - unsigned int nptes; - int r; - - /* initialize the variables */ - addr = start; - - /* walk over the address space and update the page tables */ - while (addr < end) { - pt = amdgpu_vm_get_pt(params, addr); - if (!pt) { - pr_err("PT not found, aborting update_ptes\n"); - return -EINVAL; - } - - WARN_ON(params->shadow); - - r = amdgpu_bo_kmap(pt, &pe_ptr); - if (r) - return r; - - pe_ptr += (addr & mask) * 8; - - if ((addr & ~mask) == (end & ~mask)) - nptes = end - addr; - else - nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); - - params->func(params, (uint64_t)pe_ptr, dst, nptes, - AMDGPU_GPU_PAGE_SIZE, flags); - - amdgpu_bo_kunmap(pt); - addr += nptes; - dst += nptes * AMDGPU_GPU_PAGE_SIZE; - } - - return 0; -} - /** * amdgpu_vm_update_ptes - make sure that page tables are valid * @@ -1339,10 +1286,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, uint64_t addr, pe_start; struct amdgpu_bo *pt; unsigned nptes; + int r; + bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes); - if (params->func == amdgpu_vm_cpu_set_ptes) - return amdgpu_vm_update_ptes_cpu(params, start, end, - dst, flags); /* walk over the address space and update the page tables */ for (addr = start; addr < end; addr += nptes) { @@ -1353,6 +1299,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, } if (params->shadow) { + if (WARN_ONCE(use_cpu_update, + "CPU VM update doesn't suuport shadow pages")) + return 0; + if (!pt->shadow) return 0; pt = pt->shadow; @@ -1363,13 +1313,22 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, else nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); - pe_start = amdgpu_bo_gpu_offset(pt); + if (use_cpu_update) { + r = amdgpu_bo_kmap(pt, (void *)&pe_start); + if (r) + return r; + } else + pe_start = amdgpu_bo_gpu_offset(pt); + pe_start += (addr & mask) * 8; params->func(params, pe_start, dst, nptes, AMDGPU_GPU_PAGE_SIZE, flags); dst += nptes * AMDGPU_GPU_PAGE_SIZE; + + if (use_cpu_update) + amdgpu_bo_kunmap(pt); } return 0; -- cgit From a1924005a2e9bfcc4e217b4acd0a4f2421969040 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Fri, 9 Jun 2017 17:47:28 -0400 Subject: drm/amdgpu: Fix compiler warnings Acked-by: Alex Deucher Signed-off-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c308047bfb13..9743db515e0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -984,7 +984,7 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, value = params->pages_addr ? amdgpu_vm_map_gart(params->pages_addr, addr) : addr; - amdgpu_gart_set_pte_pde(params->adev, (void *)pe, + amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe, i, value, flags); addr += incr; } @@ -1023,11 +1023,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, unsigned level) { struct amdgpu_bo *shadow; - struct amdgpu_ring *ring; - uint64_t pd_addr, shadow_addr; + struct amdgpu_ring *ring = NULL; + uint64_t pd_addr, shadow_addr = 0; uint32_t incr = amdgpu_vm_bo_size(adev, level + 1); uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0; - unsigned count = 0, pt_idx, ndw; + unsigned count = 0, pt_idx, ndw = 0; struct amdgpu_job *job; struct amdgpu_pte_update_params params; struct dma_fence *fence = NULL; -- cgit