aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler')
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c215
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c229
2 files changed, 249 insertions, 195 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 6b25b2f4f5a3..fe09e5be79bd 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -73,6 +73,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->priority = priority;
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
entity->last_scheduled = NULL;
+ RB_CLEAR_NODE(&entity->rb_tree_node);
if(num_sched_list)
entity->rq = &sched_list[0]->sched_rq[entity->priority];
@@ -139,6 +140,73 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
return true;
}
+static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
+{
+ struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
+
+ drm_sched_fence_finished(job->s_fence);
+ WARN_ON(job->s_fence->parent);
+ job->sched->ops->free_job(job);
+}
+
+/* Signal the scheduler finished fence when the entity in question is killed. */
+static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
+{
+ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
+ finish_cb);
+ int r;
+
+ dma_fence_put(f);
+
+ /* Wait for all dependencies to avoid data corruptions */
+ while (!xa_empty(&job->dependencies)) {
+ f = xa_erase(&job->dependencies, job->last_dependency++);
+ r = dma_fence_add_callback(f, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb);
+ if (!r)
+ return;
+
+ dma_fence_put(f);
+ }
+
+ INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
+ schedule_work(&job->work);
+}
+
+/* Remove the entity from the scheduler and kill all pending jobs */
+static void drm_sched_entity_kill(struct drm_sched_entity *entity)
+{
+ struct drm_sched_job *job;
+ struct dma_fence *prev;
+
+ if (!entity->rq)
+ return;
+
+ spin_lock(&entity->rq_lock);
+ entity->stopped = true;
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ spin_unlock(&entity->rq_lock);
+
+ /* Make sure this entity is not used by the scheduler at the moment */
+ wait_for_completion(&entity->entity_idle);
+
+ prev = dma_fence_get(entity->last_scheduled);
+ while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = job->s_fence;
+
+ dma_fence_set_error(&s_fence->finished, -ESRCH);
+
+ dma_fence_get(&s_fence->finished);
+ if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb))
+ drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+
+ prev = &s_fence->finished;
+ }
+ dma_fence_put(prev);
+}
+
/**
* drm_sched_entity_flush - Flush a context entity
*
@@ -179,87 +247,13 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
/* For killed process disable any more IBs enqueue right now */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
if ((!last_user || last_user == current->group_leader) &&
- (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
- spin_lock(&entity->rq_lock);
- entity->stopped = true;
- drm_sched_rq_remove_entity(entity->rq, entity);
- spin_unlock(&entity->rq_lock);
- }
+ (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+ drm_sched_entity_kill(entity);
return ret;
}
EXPORT_SYMBOL(drm_sched_entity_flush);
-static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
-{
- struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
-
- drm_sched_fence_finished(job->s_fence);
- WARN_ON(job->s_fence->parent);
- job->sched->ops->free_job(job);
-}
-
-
-/* Signal the scheduler finished fence when the entity in question is killed. */
-static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
- struct dma_fence_cb *cb)
-{
- struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
- finish_cb);
-
- INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
- schedule_work(&job->work);
-}
-
-static struct dma_fence *
-drm_sched_job_dependency(struct drm_sched_job *job,
- struct drm_sched_entity *entity)
-{
- if (!xa_empty(&job->dependencies))
- return xa_erase(&job->dependencies, job->last_dependency++);
-
- if (job->sched->ops->dependency)
- return job->sched->ops->dependency(job, entity);
-
- return NULL;
-}
-
-static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
-{
- struct drm_sched_job *job;
- struct dma_fence *f;
- int r;
-
- while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
- struct drm_sched_fence *s_fence = job->s_fence;
-
- /* Wait for all dependencies to avoid data corruptions */
- while ((f = drm_sched_job_dependency(job, entity)))
- dma_fence_wait(f, false);
-
- drm_sched_fence_scheduled(s_fence);
- dma_fence_set_error(&s_fence->finished, -ESRCH);
-
- /*
- * When pipe is hanged by older entity, new entity might
- * not even have chance to submit it's first job to HW
- * and so entity->last_scheduled will remain NULL
- */
- if (!entity->last_scheduled) {
- drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
- continue;
- }
-
- r = dma_fence_add_callback(entity->last_scheduled,
- &job->finish_cb,
- drm_sched_entity_kill_jobs_cb);
- if (r == -ENOENT)
- drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
- else if (r)
- DRM_ERROR("fence add callback failed (%d)\n", r);
- }
-}
-
/**
* drm_sched_entity_fini - Destroy a context entity
*
@@ -273,33 +267,17 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
*/
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = NULL;
-
- if (entity->rq) {
- sched = entity->rq->sched;
- drm_sched_rq_remove_entity(entity->rq, entity);
- }
-
- /* Consumption of existing IBs wasn't completed. Forcefully
- * remove them here.
+ /*
+ * If consumption of existing IBs wasn't completed. Forcefully remove
+ * them here. Also makes sure that the scheduler won't touch this entity
+ * any more.
*/
- if (spsc_queue_count(&entity->job_queue)) {
- if (sched) {
- /*
- * Wait for thread to idle to make sure it isn't processing
- * this entity.
- */
- wait_for_completion(&entity->entity_idle);
+ drm_sched_entity_kill(entity);
- }
- if (entity->dependency) {
- dma_fence_remove_callback(entity->dependency,
- &entity->cb);
- dma_fence_put(entity->dependency);
- entity->dependency = NULL;
- }
-
- drm_sched_entity_kill_jobs(entity);
+ if (entity->dependency) {
+ dma_fence_remove_callback(entity->dependency, &entity->cb);
+ dma_fence_put(entity->dependency);
+ entity->dependency = NULL;
}
dma_fence_put(entity->last_scheduled);
@@ -385,7 +363,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
}
s_fence = to_drm_sched_fence(fence);
- if (s_fence && s_fence->sched == sched) {
+ if (s_fence && s_fence->sched == sched &&
+ !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
/*
* Fence is from the same scheduler, only need to wait for
@@ -411,6 +390,19 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
return false;
}
+static struct dma_fence *
+drm_sched_job_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *entity)
+{
+ if (!xa_empty(&job->dependencies))
+ return xa_erase(&job->dependencies, job->last_dependency++);
+
+ if (job->sched->ops->prepare_job)
+ return job->sched->ops->prepare_job(job, entity);
+
+ return NULL;
+}
+
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
struct drm_sched_job *sched_job;
@@ -443,6 +435,19 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
smp_wmb();
spsc_queue_pop(&entity->job_queue);
+
+ /*
+ * Update the entity's location in the min heap according to
+ * the timestamp of the next job, if any.
+ */
+ if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
+ struct drm_sched_job *next;
+
+ next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ if (next)
+ drm_sched_rq_update_fifo(entity, next->submit_ts);
+ }
+
return sched_job;
}
@@ -507,6 +512,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
+ sched_job->submit_ts = ktime_get();
/* first job wakes up scheduler */
if (first) {
@@ -518,8 +524,13 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
DRM_ERROR("Trying to push to a killed entity\n");
return;
}
+
drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
+
+ if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+ drm_sched_rq_update_fifo(entity, sched_job->submit_ts);
+
drm_sched_wakeup(entity->rq->sched);
}
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index e5a4ecde0063..31f3a1267be4 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -62,6 +62,55 @@
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
+int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
+
+/**
+ * DOC: sched_policy (int)
+ * Used to override default entities scheduling policy in a run queue.
+ */
+MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
+module_param_named(sched_policy, drm_sched_policy, int, 0444);
+
+static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
+ const struct rb_node *b)
+{
+ struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
+ struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
+
+ return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
+}
+
+static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
+{
+ struct drm_sched_rq *rq = entity->rq;
+
+ if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
+ rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
+ RB_CLEAR_NODE(&entity->rb_tree_node);
+ }
+}
+
+void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
+{
+ /*
+ * Both locks need to be grabbed, one to protect from entity->rq change
+ * for entity from within concurrent drm_sched_entity_select_rq and the
+ * other to update the rb tree structure.
+ */
+ spin_lock(&entity->rq_lock);
+ spin_lock(&entity->rq->lock);
+
+ drm_sched_rq_remove_fifo_locked(entity);
+
+ entity->oldest_job_waiting = ts;
+
+ rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
+ drm_sched_entity_compare_before);
+
+ spin_unlock(&entity->rq->lock);
+ spin_unlock(&entity->rq_lock);
+}
+
/**
* drm_sched_rq_init - initialize a given run queue struct
*
@@ -75,6 +124,7 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
+ rq->rb_tree_root = RB_ROOT_CACHED;
rq->current_entity = NULL;
rq->sched = sched;
}
@@ -92,9 +142,12 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
{
if (!list_empty(&entity->list))
return;
+
spin_lock(&rq->lock);
+
atomic_inc(rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
+
spin_unlock(&rq->lock);
}
@@ -111,23 +164,30 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
{
if (list_empty(&entity->list))
return;
+
spin_lock(&rq->lock);
+
atomic_dec(rq->sched->score);
list_del_init(&entity->list);
+
if (rq->current_entity == entity)
rq->current_entity = NULL;
+
+ if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+ drm_sched_rq_remove_fifo_locked(entity);
+
spin_unlock(&rq->lock);
}
/**
- * drm_sched_rq_select_entity - Select an entity which could provide a job to run
+ * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
*
* @rq: scheduler run queue to check.
*
* Try to find a ready entity, returns NULL if none found.
*/
static struct drm_sched_entity *
-drm_sched_rq_select_entity(struct drm_sched_rq *rq)
+drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
{
struct drm_sched_entity *entity;
@@ -164,6 +224,34 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
}
/**
+ * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
+ *
+ * @rq: scheduler run queue to check.
+ *
+ * Find oldest waiting ready entity, returns NULL if none found.
+ */
+static struct drm_sched_entity *
+drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
+{
+ struct rb_node *rb;
+
+ spin_lock(&rq->lock);
+ for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
+ struct drm_sched_entity *entity;
+
+ entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
+ if (drm_sched_entity_is_ready(entity)) {
+ rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
+ break;
+ }
+ }
+ spin_unlock(&rq->lock);
+
+ return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
+}
+
+/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
@@ -198,32 +286,6 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
}
/**
- * drm_sched_dependency_optimized - test if the dependency can be optimized
- *
- * @fence: the dependency fence
- * @entity: the entity which depends on the above fence
- *
- * Returns true if the dependency can be optimized and false otherwise
- */
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
- struct drm_sched_entity *entity)
-{
- struct drm_gpu_scheduler *sched = entity->rq->sched;
- struct drm_sched_fence *s_fence;
-
- if (!fence || dma_fence_is_signaled(fence))
- return false;
- if (fence->context == entity->fence_context)
- return true;
- s_fence = to_drm_sched_fence(fence);
- if (s_fence && s_fence->sched == sched)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(drm_sched_dependency_optimized);
-
-/**
* drm_sched_start_timeout - start timeout for reset worker
*
* @sched: scheduler instance to start the worker for
@@ -355,27 +417,6 @@ static void drm_sched_job_timedout(struct work_struct *work)
}
}
- /**
- * drm_sched_increase_karma - Update sched_entity guilty flag
- *
- * @bad: The job guilty of time out
- *
- * Increment on every hang caused by the 'bad' job. If this exceeds the hang
- * limit of the scheduler then the respective sched entity is marked guilty and
- * jobs from it will not be scheduled further
- */
-void drm_sched_increase_karma(struct drm_sched_job *bad)
-{
- drm_sched_increase_karma_ext(bad, 1);
-}
-EXPORT_SYMBOL(drm_sched_increase_karma);
-
-void drm_sched_reset_karma(struct drm_sched_job *bad)
-{
- drm_sched_increase_karma_ext(bad, 0);
-}
-EXPORT_SYMBOL(drm_sched_reset_karma);
-
/**
* drm_sched_stop - stop the scheduler
*
@@ -517,31 +558,14 @@ EXPORT_SYMBOL(drm_sched_start);
*/
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
{
- drm_sched_resubmit_jobs_ext(sched, INT_MAX);
-}
-EXPORT_SYMBOL(drm_sched_resubmit_jobs);
-
-/**
- * drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
- *
- * @sched: scheduler instance
- * @max: job numbers to relaunch
- *
- */
-void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
-{
struct drm_sched_job *s_job, *tmp;
uint64_t guilty_context;
bool found_guilty = false;
struct dma_fence *fence;
- int i = 0;
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
- if (i >= max)
- break;
-
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
found_guilty = true;
guilty_context = s_job->s_fence->scheduled.context;
@@ -551,7 +575,6 @@ void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
fence = sched->ops->run_job(s_job);
- i++;
if (IS_ERR_OR_NULL(fence)) {
if (IS_ERR(fence))
@@ -567,7 +590,7 @@ void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
}
}
}
-EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
+EXPORT_SYMBOL(drm_sched_resubmit_jobs);
/**
* drm_sched_job_init - init a scheduler job
@@ -685,32 +708,28 @@ int drm_sched_job_add_dependency(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_add_dependency);
/**
- * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
- * dependencies
+ * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
* @job: scheduler job to add the dependencies to
- * @obj: the gem object to add new dependencies from.
- * @write: whether the job might write the object (so we need to depend on
- * shared fences in the reservation object).
+ * @resv: the dma_resv object to get the fences from
+ * @usage: the dma_resv_usage to use to filter the fences
*
- * This should be called after drm_gem_lock_reservations() on your array of
- * GEM objects used in the job but before updating the reservations with your
- * own fences.
+ * This adds all fences matching the given usage from @resv to @job.
+ * Must be called with the @resv lock held.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
-int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
- struct drm_gem_object *obj,
- bool write)
+int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage)
{
struct dma_resv_iter cursor;
struct dma_fence *fence;
int ret;
- dma_resv_assert_held(obj->resv);
+ dma_resv_assert_held(resv);
- dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write),
- fence) {
+ dma_resv_for_each_fence(&cursor, resv, usage, fence) {
/* Make sure to grab an additional ref on the added fence */
dma_fence_get(fence);
ret = drm_sched_job_add_dependency(job, fence);
@@ -721,8 +740,31 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
}
return 0;
}
-EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
+EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
+/**
+ * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
+ * dependencies
+ * @job: scheduler job to add the dependencies to
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
+ *
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write)
+{
+ return drm_sched_job_add_resv_dependencies(job, obj->resv,
+ dma_resv_usage_rw(write));
+}
+EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
/**
* drm_sched_job_cleanup - clean up scheduler job resources
@@ -803,7 +845,9 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
/* Kernel run queue has higher priority than normal run queue*/
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
- entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
+ entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
+ drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
+ drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
if (entity)
break;
}
@@ -1082,13 +1126,15 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
EXPORT_SYMBOL(drm_sched_fini);
/**
- * drm_sched_increase_karma_ext - Update sched_entity guilty flag
+ * drm_sched_increase_karma - Update sched_entity guilty flag
*
* @bad: The job guilty of time out
- * @type: type for increase/reset karma
*
+ * Increment on every hang caused by the 'bad' job. If this exceeds the hang
+ * limit of the scheduler then the respective sched entity is marked guilty and
+ * jobs from it will not be scheduled further
*/
-void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
+void drm_sched_increase_karma(struct drm_sched_job *bad)
{
int i;
struct drm_sched_entity *tmp;
@@ -1100,10 +1146,7 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
* corrupt but keep in mind that kernel jobs always considered good.
*/
if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
- if (type == 0)
- atomic_set(&bad->karma, 0);
- else if (type == 1)
- atomic_inc(&bad->karma);
+ atomic_inc(&bad->karma);
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
i++) {
@@ -1114,7 +1157,7 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
if (bad->s_fence->scheduled.context ==
entity->fence_context) {
if (entity->guilty)
- atomic_set(entity->guilty, type);
+ atomic_set(entity->guilty, 1);
break;
}
}
@@ -1124,4 +1167,4 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
}
}
}
-EXPORT_SYMBOL(drm_sched_increase_karma_ext);
+EXPORT_SYMBOL(drm_sched_increase_karma);