aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gpu.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c78
1 files changed, 35 insertions, 43 deletions
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index faf0c242874e..c8cd9bfa3eeb 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -164,24 +164,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
return ret;
}
-static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
- uint32_t fence)
-{
- struct msm_gem_submit *submit;
- unsigned long flags;
-
- spin_lock_irqsave(&ring->submit_lock, flags);
- list_for_each_entry(submit, &ring->submits, node) {
- if (fence_after(submit->seqno, fence))
- break;
-
- msm_update_fence(submit->ring->fctx,
- submit->hw_fence->seqno);
- dma_fence_signal(submit->hw_fence);
- }
- spin_unlock_irqrestore(&ring->submit_lock, flags);
-}
-
#ifdef CONFIG_DEV_COREDUMP
static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
@@ -351,6 +333,28 @@ find_submit(struct msm_ringbuffer *ring, uint32_t fence)
static void retire_submits(struct msm_gpu *gpu);
+static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
+{
+ struct msm_file_private *ctx = submit->queue->ctx;
+ struct task_struct *task;
+
+ /* Note that kstrdup will return NULL if argument is NULL: */
+ *comm = kstrdup(ctx->comm, GFP_KERNEL);
+ *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
+
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
+ if (!task)
+ return;
+
+ if (!*comm)
+ *comm = kstrdup(task->comm, GFP_KERNEL);
+
+ if (!*cmd)
+ *cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+
+ put_task_struct(task);
+}
+
static void recover_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
@@ -367,18 +371,12 @@ static void recover_worker(struct kthread_work *work)
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit) {
- struct task_struct *task;
-
/* Increment the fault counts */
submit->queue->faults++;
- submit->aspace->faults++;
+ if (submit->aspace)
+ submit->aspace->faults++;
- task = get_pid_task(submit->pid, PIDTYPE_PID);
- if (task) {
- comm = kstrdup(task->comm, GFP_KERNEL);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
- put_task_struct(task);
- }
+ get_comm_cmdline(submit, &comm, &cmd);
if (comm && cmd) {
DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
@@ -420,9 +418,9 @@ static void recover_worker(struct kthread_work *work)
* one more to clear the faulting submit
*/
if (ring == cur_ring)
- fence++;
+ ring->memptrs->fence = ++fence;
- update_fences(gpu, ring, fence);
+ msm_update_fence(ring->fctx, fence);
}
if (msm_gpu_active(gpu)) {
@@ -467,14 +465,7 @@ static void fault_worker(struct kthread_work *work)
goto resume_smmu;
if (submit) {
- struct task_struct *task;
-
- task = get_pid_task(submit->pid, PIDTYPE_PID);
- if (task) {
- comm = kstrdup(task->comm, GFP_KERNEL);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
- put_task_struct(task);
- }
+ get_comm_cmdline(submit, &comm, &cmd);
/*
* When we get GPU iova faults, we can get 1000s of them,
@@ -515,7 +506,7 @@ static void hangcheck_handler(struct timer_list *t)
if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
ring->hangcheck_fence = fence;
- } else if (fence_before(fence, ring->seqno)) {
+ } else if (fence_before(fence, ring->fctx->last_fence)) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
@@ -523,13 +514,13 @@ static void hangcheck_handler(struct timer_list *t)
DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, ring->seqno);
+ gpu->name, ring->fctx->last_fence);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
- if (fence_after(ring->seqno, ring->hangcheck_fence))
+ if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -663,7 +654,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
msm_submit_retire(submit);
pm_runtime_mark_last_busy(&gpu->pdev->dev);
- pm_runtime_put_autosuspend(&gpu->pdev->dev);
spin_lock_irqsave(&ring->submit_lock, flags);
list_del(&submit->node);
@@ -677,6 +667,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
msm_devfreq_idle(gpu);
mutex_unlock(&gpu->active_lock);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
msm_gem_submit_put(submit);
}
@@ -726,7 +718,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
int i;
for (i = 0; i < gpu->nr_rings; i++)
- update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
+ msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
kthread_queue_work(gpu->worker, &gpu->retire_work);
update_sw_cntrs(gpu);
@@ -746,7 +738,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
msm_gpu_hw_init(gpu);
- submit->seqno = ++ring->seqno;
+ submit->seqno = submit->hw_fence->seqno;
msm_rd_dump_submit(priv->rd, submit, NULL);