aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2024-08-21 13:55:41 +0200
committerAlex Deucher <alexander.deucher@amd.com>2024-09-25 12:55:44 -0400
commit126be9b2bef9c7068fdd464790d82e6d70f9d8e6 (patch)
treebec6389afde2af3dfe1237aa1079582fdddf79d8 /drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
parent4771d2ecb7b9e4c2c73ede2908d7e7c989460981 (diff)
drm/amdgpu: sync to KFD fences before clearing PTEs
This patch tries to solve the basic problem we also need to sync to the KFD fences of the BO because otherwise it can be that we clear PTEs while the KFD queues are still running. Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Felix Kuehling <felix.kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index bdf1ef825d89..c586ab4c911b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -260,6 +260,36 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
}
+/**
+ * amdgpu_sync_kfd - sync to KFD fences
+ *
+ * @sync: sync object to add KFD fences to
+ * @resv: reservation object with KFD fences
+ *
+ * Extract all KFD fences and add them to the sync object.
+ */
+int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *f;
+ int r = 0;
+
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, f) {
+ void *fence_owner = amdgpu_sync_get_owner(f);
+
+ if (fence_owner != AMDGPU_FENCE_OWNER_KFD)
+ continue;
+
+ r = amdgpu_sync_fence(sync, f);
+ if (r)
+ break;
+ }
+ dma_resv_iter_end(&cursor);
+
+ return r;
+}
+
/* Free the entry back to the slab */
static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
{