aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
diff options
context:
space:
mode:
authorDave Airlie <[email protected]>2020-07-23 15:38:10 +1000
committerDave Airlie <[email protected]>2020-07-23 15:38:11 +1000
commit206739119508d5ab4b42ab480ff61a7e6cd72d7c (patch)
tree756285714f5b842e223e22a75d17521c9f41ae55 /drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
parent959ed53808d171cf5203cdc74578db55d0c79822 (diff)
parent6e14adea0ac3037d923a9591d1a094c115d7947c (diff)
Merge tag 'amd-drm-next-5.9-2020-07-17' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.9-2020-07-17: amdgpu: - SI UVD/VCE clock support - Updates for Sienna Cichlid - Expose drm rotation property - Atomfirmware updates for renoir - updates to GPUVM hub handling for different register layouts - swSMU restructuring and cleanups - RAS fixes - DC fixes - mode1 reset support for Sienna Cichlid - Add support for Navy Flounder GPUs amdkfd: - Add SMI events watch interface UAPI: - Add amdkfd SMI events watch interface Userspace which uses this interface: https://github.com/RadeonOpenCompute/rocm_smi_lib/commit/2235ede34c456f1c7d3490f6fe74825d442d272e Signed-off-by: Dave Airlie <[email protected]> From: Alex Deucher <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index aeada7c9fbea..a3fa1560de96 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1343,27 +1343,37 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
{
struct amdgpu_job *job;
- struct drm_sched_job *s_job;
+ struct drm_sched_job *s_job, *tmp;
uint32_t preempt_seq;
struct dma_fence *fence, **ptr;
struct amdgpu_fence_driver *drv = &ring->fence_drv;
struct drm_gpu_scheduler *sched = &ring->sched;
+ bool preempted = true;
if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
return;
preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
- if (preempt_seq <= atomic_read(&drv->last_seq))
- return;
+ if (preempt_seq <= atomic_read(&drv->last_seq)) {
+ preempted = false;
+ goto no_preempt;
+ }
preempt_seq &= drv->num_fences_mask;
ptr = &drv->fences[preempt_seq];
fence = rcu_dereference_protected(*ptr, 1);
+no_preempt:
spin_lock(&sched->job_list_lock);
- list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
+ /* remove job from ring_mirror_list */
+ list_del_init(&s_job->node);
+ sched->ops->free_job(s_job);
+ continue;
+ }
job = to_amdgpu_job(s_job);
- if (job->fence == fence)
+ if (preempted && job->fence == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
@@ -1461,10 +1471,12 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
}
if (is_support_sw_smu(adev)) {
- ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true);
+ ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq);
if (ret || val > max_freq || val < min_freq)
return -EINVAL;
- ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true);
+ ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val);
+ } else {
+ return 0;
}
pm_runtime_mark_last_busy(adev->ddev->dev);