diff options
author | Tejun Heo <tj@kernel.org> | 2015-12-07 10:09:03 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2015-12-07 10:09:03 -0500 |
commit | 0b98f0c04245877ae0b625a7f0aa55b8ff98e0c4 (patch) | |
tree | 486ebe0d76217a4f7781e28fbd96facb0b66f9da /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |
parent | 67cde9c4938945b9510730c64e68d2f1dd7bc0aa (diff) | |
parent | 527e9316f8ec44bd53d90fb9f611fa7ffff52bb9 (diff) |
Merge branch 'master' into for-4.4-fixes
The following commit which went into mainline through networking tree
3b13758f51de ("cgroups: Allow dynamically changing net_classid")
conflicts in net/core/netclassid_cgroup.c with the following pending
fix in cgroup/for-4.4-fixes.
1f7dd3e5a6e4 ("cgroup: fix handling of multi-destination migration from subtree_control enabling")
The former separates out update_classid() from cgrp_attach() and
updates it to walk all fds of all tasks in the target css so that it
can be used from both migration and config change paths. The latter
drops @css from cgrp_attach().
Resolve the conflict by making cgrp_attach() call update_classid()
with the css from the first task. We can revive @tset walking in
cgrp_attach() but given that net_cls is v1 only where there always is
only one target css during migration, this is fine.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Nina Schiff <ninasc@fb.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 43 |
1 files changed, 26 insertions, 17 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 087332858853..f6ea4b43a60c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; int r; - mutex_lock(&vm->mutex); r = amdgpu_bo_reserve(rbo, false); - if (r) { - mutex_unlock(&vm->mutex); + if (r) return r; - } bo_va = amdgpu_vm_bo_find(vm, rbo); if (!bo_va) { @@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri ++bo_va->ref_count; } amdgpu_bo_unreserve(rbo); - mutex_unlock(&vm->mutex); return 0; } @@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; int r; - mutex_lock(&vm->mutex); r = amdgpu_bo_reserve(rbo, true); if (r) { - mutex_unlock(&vm->mutex); dev_err(adev->dev, "leaking bo va because " "we fail to reserve bo (%d)\n", r); return; @@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, } } amdgpu_bo_unreserve(rbo); - mutex_unlock(&vm->mutex); } static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) @@ -242,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, AMDGPU_GEM_USERPTR_REGISTER)) return -EINVAL; - if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || - !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { + if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && ( + !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || + !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) { /* if we want to write to it we must require anonymous memory and install a MMU notifier */ @@ -483,6 +477,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (domain == AMDGPU_GEM_DOMAIN_CPU) goto error_unreserve; } + r = amdgpu_vm_update_page_directory(adev, bo_va->vm); + if (r) + goto error_unreserve; r = amdgpu_vm_clear_freed(adev, bo_va->vm); if (r) @@ -512,6 +509,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_bo *rbo; struct amdgpu_bo_va *bo_va; + struct ttm_validate_buffer tv, tv_pd; + struct ww_acquire_ctx ticket; + struct list_head list, duplicates; uint32_t invalid_flags, va_flags = 0; int r = 0; @@ -547,19 +547,28 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) return -ENOENT; - mutex_lock(&fpriv->vm.mutex); rbo = gem_to_amdgpu_bo(gobj); - r = amdgpu_bo_reserve(rbo, false); + INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&duplicates); + tv.bo = &rbo->tbo; + tv.shared = true; + list_add(&tv.head, &list); + + if (args->operation == AMDGPU_VA_OP_MAP) { + tv_pd.bo = &fpriv->vm.page_directory->tbo; + tv_pd.shared = true; + list_add(&tv_pd.head, &list); + } + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) { - mutex_unlock(&fpriv->vm.mutex); drm_gem_object_unreference_unlocked(gobj); return r; } bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); if (!bo_va) { - amdgpu_bo_unreserve(rbo); - mutex_unlock(&fpriv->vm.mutex); + ttm_eu_backoff_reservation(&ticket, &list); + drm_gem_object_unreference_unlocked(gobj); return -ENOENT; } @@ -581,10 +590,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, default: break; } - + ttm_eu_backoff_reservation(&ticket, &list); if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) amdgpu_gem_va_update_vm(adev, bo_va, args->operation); - mutex_unlock(&fpriv->vm.mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } |