aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/gpu_scheduler.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-08-08 06:09:08 +1000
committerDave Airlie <airlied@redhat.com>2018-08-08 06:22:23 +1000
commit940fbcb73fd25b517fa10c5a9cc96ca0ce1a2fc4 (patch)
treec967fae5501fefe9258f9891371977833bd2a72c /drivers/gpu/drm/scheduler/gpu_scheduler.c
parent569f0a8694d0ff13c5d296a594c7d8cec8d6f35f (diff)
parentdf36b2fb8390d98453fff1aae3927095fe9ff36c (diff)
Merge branch 'drm-next-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-next
Fixes for 4.19: - Fix UVD 7.2 instance handling - Fix UVD 7.2 harvesting - GPU scheduler fix for when a process is killed - TTM cleanups - amdgpu CS bo_list fixes - Powerplay fixes for polaris12 and CZ/ST - DC fixes for link training certain HMDs - DC fix for vega10 blank screen in certain cases From: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180801222906.1016-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/scheduler/gpu_scheduler.c')
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c41
1 files changed, 13 insertions, 28 deletions
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 3f2fc5e8242a..1b733229201e 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -199,21 +199,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
EXPORT_SYMBOL(drm_sched_entity_init);
/**
- * drm_sched_entity_is_initialized - Query if entity is initialized
- *
- * @sched: Pointer to scheduler instance
- * @entity: The pointer to a valid scheduler entity
- *
- * return true if entity is initialized, false otherwise
-*/
-static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
-{
- return entity->rq != NULL &&
- entity->rq->sched == sched;
-}
-
-/**
* drm_sched_entity_is_idle - Check if entity is idle
*
* @entity: scheduler entity
@@ -224,7 +209,8 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb();
- if (!entity->rq || spsc_queue_peek(&entity->job_queue) == NULL)
+ if (list_empty(&entity->list) ||
+ spsc_queue_peek(&entity->job_queue) == NULL)
return true;
return false;
@@ -275,11 +261,10 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{
struct drm_gpu_scheduler *sched;
+ struct task_struct *last_user;
long ret = timeout;
sched = entity->rq->sched;
- if (!drm_sched_entity_is_initialized(sched, entity))
- return ret;
/**
* The client will not queue more IBs during this fini, consume existing
* queued IBs or discard them on SIGKILL
@@ -295,8 +280,10 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
/* For killed process disable any more IBs enqueue right now */
- if ((current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
- drm_sched_entity_set_rq(entity, NULL);
+ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
+ if ((!last_user || last_user == current->group_leader) &&
+ (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+ drm_sched_rq_remove_entity(entity->rq, entity);
return ret;
}
@@ -317,7 +304,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
struct drm_gpu_scheduler *sched;
sched = entity->rq->sched;
- drm_sched_entity_set_rq(entity, NULL);
+ drm_sched_rq_remove_entity(entity->rq, entity);
/* Consumption of existing IBs wasn't completed. Forcefully
* remove them here.
@@ -413,15 +400,12 @@ void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
if (entity->rq == rq)
return;
- spin_lock(&entity->rq_lock);
-
- if (entity->rq)
- drm_sched_rq_remove_entity(entity->rq, entity);
+ BUG_ON(!rq);
+ spin_lock(&entity->rq_lock);
+ drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
- if (rq)
- drm_sched_rq_add_entity(rq, entity);
-
+ drm_sched_rq_add_entity(rq, entity);
spin_unlock(&entity->rq_lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_rq);
@@ -541,6 +525,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity);
+ WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */