aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-03-17 08:25:04 +1000
committerDave Airlie <airlied@redhat.com>2016-03-17 08:25:04 +1000
commit9f443bf53b5699835e0132d62d1e6c99a1eaeee8 (patch)
tree482b1f57019446cc866a0fc8e87bd4b0b0119775 /drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
parent70a09f36d02584fe0025fa14a5cbf276240b2fd4 (diff)
parent00b7c4ff7d482d287a591f047e0963d494569b46 (diff)
Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few more fixes and cleanups for 4.6: - DCE code cleanups - HDP flush/invalidation fixes - GPUVM fixes - switch to drm_vblank_[on|off] - PX fixes - misc bug fixes * 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux: (50 commits) drm/amdgpu: split pipeline sync out of SDMA vm_flush() as well drm/amdgpu: Revert "add mutex for ba_va->valids/invalids" drm/amdgpu: Revert "add lock for interval tree in vm" drm/amdgpu: Revert "add spin lock to protect freed list in vm (v3)" drm/amdgpu: reserve the PD during unmap and remove drm/amdgpu: Fix two bugs in amdgpu_vm_bo_split_mapping drm/radeon: Don't drop DP 2.7 Ghz link setup on some cards. MAINTAINERS: update radeon entry to include amdgpu as well drm/amdgpu: disable runtime pm on PX laptops without dGPU power control drm/radeon: disable runtime pm on PX laptops without dGPU power control drm/amd/amdgpu: Fix indentation in do_set_base() (DCEv8) drm/amd/amdgpu: make afmt_init cleanup if alloc fails (DCEv8) drm/amd/amdgpu: Move config init flag to bottom of sw_init (DCEv8) drm/amd/amdgpu: Don't proceed into audio_fini if audio is disabled (DCEv8) drm/amd/amdgpu: Fix identation in do_set_base() (DCEv10) drm/amd/amdgpu: Make afmt_init cleanup if alloc fails (DCEv10) drm/amd/amdgpu: Move initialized flag to bottom of sw_init (DCEv10) drm/amd/amdgpu: Don't proceed in audio_fini if disabled (DCEv10) drm/amd/amdgpu: Fix indentation in dce_v11_0_crtc_do_set_base() drm/amd/amdgpu: Make afmt_init() cleanup if alloc fails (DCEv11) ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c88
1 files changed, 61 insertions, 27 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c15be00de904..c48b4fce5e57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -37,6 +37,8 @@ struct amdgpu_sync_entry {
struct fence *fence;
};
+static struct kmem_cache *amdgpu_sync_slab;
+
/**
* amdgpu_sync_create - zero init sync object
*
@@ -50,14 +52,18 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
sync->last_vm_update = NULL;
}
+/**
+ * amdgpu_sync_same_dev - test if fence belong to us
+ *
+ * @adev: amdgpu device to use for the test
+ * @f: fence to test
+ *
+ * Test if the fence was issued by us.
+ */
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
{
- struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
- if (a_fence)
- return a_fence->ring->adev == adev;
-
if (s_fence) {
struct amdgpu_ring *ring;
@@ -68,17 +74,31 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
return false;
}
-static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
+/**
+ * amdgpu_sync_get_owner - extract the owner of a fence
+ *
+ * @fence: fence get the owner from
+ *
+ * Extract who originally created the fence.
+ */
+static void *amdgpu_sync_get_owner(struct fence *f)
{
- struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
if (s_fence)
- return s_fence->owner == owner;
- if (a_fence)
- return a_fence->owner == owner;
- return false;
+ return s_fence->owner;
+
+ return AMDGPU_FENCE_OWNER_UNDEFINED;
}
+/**
+ * amdgpu_sync_keep_later - Keep the later fence
+ *
+ * @keep: existing fence to test
+ * @fence: new fence
+ *
+ * Either keep the existing fence or the new one, depending which one is later.
+ */
static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
{
if (*keep && fence_is_later(*keep, fence))
@@ -104,7 +124,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
if (amdgpu_sync_same_dev(adev, f) &&
- amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM))
+ amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
amdgpu_sync_keep_later(&sync->last_vm_update, f);
hash_for_each_possible(sync->fences, e, node, f->context) {
@@ -115,7 +135,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
}
- e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
+ e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
if (!e)
return -ENOMEM;
@@ -124,18 +144,6 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
}
-static void *amdgpu_sync_get_owner(struct fence *f)
-{
- struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
-
- if (s_fence)
- return s_fence->owner;
- else if (a_fence)
- return a_fence->owner;
- return AMDGPU_FENCE_OWNER_UNDEFINED;
-}
-
/**
* amdgpu_sync_resv - sync to a reservation object
*
@@ -208,7 +216,7 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
f = e->fence;
hash_del(&e->node);
- kfree(e);
+ kmem_cache_free(amdgpu_sync_slab, e);
if (!fence_is_signaled(f))
return f;
@@ -231,7 +239,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
hash_del(&e->node);
fence_put(e->fence);
- kfree(e);
+ kmem_cache_free(amdgpu_sync_slab, e);
}
return 0;
@@ -253,8 +261,34 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
fence_put(e->fence);
- kfree(e);
+ kmem_cache_free(amdgpu_sync_slab, e);
}
fence_put(sync->last_vm_update);
}
+
+/**
+ * amdgpu_sync_init - init sync object subsystem
+ *
+ * Allocate the slab allocator.
+ */
+int amdgpu_sync_init(void)
+{
+ amdgpu_sync_slab = kmem_cache_create(
+ "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!amdgpu_sync_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_sync_fini - fini sync object subsystem
+ *
+ * Free the slab allocator.
+ */
+void amdgpu_sync_fini(void)
+{
+ kmem_cache_destroy(amdgpu_sync_slab);
+}