aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
diff options
context:
space:
mode:
authorJack Xiao <Jack.Xiao@amd.com>2019-06-20 10:17:31 -0500
committerAlex Deucher <alexander.deucher@amd.com>2019-06-21 18:58:21 -0500
commit6698a3d05fda57f37add68c55a0696bfa7100413 (patch)
tree291c9012b429224f46c466c8a111246740f89a96 /drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
parent43974dacb6c35ea6a2bf89c1d5e2e04195464f81 (diff)
drm/amdgpu: add mcbp unit test in debugfs (v3)
The MCBP unit test is used to test the functionality of MCBP. It emualtes to send preemption request and resubmit the unfinished jobs. v2: squash in fixes (Alex) v3: squash in memory leak fix (Jack) Acked-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Jack Xiao <Jack.Xiao@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c158
1 files changed, 158 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 8930d66f2204..8339f7a47cb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -920,17 +920,175 @@ static const struct drm_info_list amdgpu_debugfs_list[] = {
{"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
};
+static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
+ struct dma_fence **fences)
+{
+ struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ uint32_t sync_seq, last_seq;
+
+ last_seq = atomic_read(&ring->fence_drv.last_seq);
+ sync_seq = ring->fence_drv.sync_seq;
+
+ last_seq &= drv->num_fences_mask;
+ sync_seq &= drv->num_fences_mask;
+
+ do {
+ struct dma_fence *fence, **ptr;
+
+ ++last_seq;
+ last_seq &= drv->num_fences_mask;
+ ptr = &drv->fences[last_seq];
+
+ fence = rcu_dereference_protected(*ptr, 1);
+ RCU_INIT_POINTER(*ptr, NULL);
+
+ if (!fence)
+ continue;
+
+ fences[last_seq] = fence;
+
+ } while (last_seq != sync_seq);
+}
+
+static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
+ int length)
+{
+ int i;
+ struct dma_fence *fence;
+
+ for (i = 0; i < length; i++) {
+ fence = fences[i];
+ if (!fence)
+ continue;
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
+ }
+}
+
+static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *s_job;
+ struct dma_fence *fence;
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ fence = sched->ops->run_job(s_job);
+ dma_fence_put(fence);
+ }
+ spin_unlock(&sched->job_list_lock);
+}
+
+static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
+{
+ int r, resched, length;
+ struct amdgpu_ring *ring;
+ struct drm_sched_job *s_job;
+ struct amdgpu_job *job;
+ struct dma_fence **fences = NULL;
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+
+ if (val >= AMDGPU_MAX_RINGS)
+ return -EINVAL;
+
+ ring = adev->rings[val];
+
+ if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
+ return -EINVAL;
+
+ /* the last preemption failed */
+ if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
+ return -EBUSY;
+
+ length = ring->fence_drv.num_fences_mask + 1;
+ fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
+
+ /* stop the scheduler */
+ kthread_park(ring->sched.thread);
+
+ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+
+ /* preempt the IB */
+ r = amdgpu_ring_preempt_ib(ring);
+ if (r) {
+ DRM_WARN("failed to preempt ring %d\n", ring->idx);
+ goto failure;
+ }
+
+ amdgpu_fence_process(ring);
+
+ if (atomic_read(&ring->fence_drv.last_seq) !=
+ ring->fence_drv.sync_seq) {
+ DRM_INFO("ring %d was preempted\n", ring->idx);
+
+ /* swap out the old fences */
+ amdgpu_ib_preempt_fences_swap(ring, fences);
+
+ amdgpu_fence_driver_force_completion(ring);
+
+ s_job = list_first_entry_or_null(
+ &ring->sched.ring_mirror_list,
+ struct drm_sched_job, node);
+ if (s_job) {
+ job = to_amdgpu_job(s_job);
+ /* mark the job as preempted */
+ /* job->preemption_status |=
+ AMDGPU_IB_PREEMPTED; */
+ }
+
+ /* resubmit unfinished jobs */
+ amdgpu_ib_preempt_job_recovery(&ring->sched);
+
+ /* wait for jobs finished */
+ amdgpu_fence_wait_empty(ring);
+
+ /* signal the old fences */
+ amdgpu_ib_preempt_signal_fences(fences, length);
+ }
+
+failure:
+ /* restart the scheduler */
+ kthread_unpark(ring->sched.thread);
+
+ ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+
+ if (fences)
+ kfree(fences);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
+ amdgpu_debugfs_ib_preempt, "%llu\n");
+
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
+ adev->debugfs_preempt =
+ debugfs_create_file("amdgpu_preempt_ib", 0600,
+ adev->ddev->primary->debugfs_root,
+ (void *)adev, &fops_ib_preempt);
+ if (!(adev->debugfs_preempt)) {
+ DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
+ return -EIO;
+ }
+
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list));
}
+void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
+{
+ if (adev->debugfs_preempt)
+ debugfs_remove(adev->debugfs_preempt);
+}
+
#else
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
return 0;
}
+void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { }
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;