aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_guc_submit.c
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2024-10-02 06:16:39 -0700
committerMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2024-10-02 17:54:05 +0200
commit9286a191abe2ea01b34be577e8a09a412dcbb644 (patch)
treefa2d6fb99dc853c6821811370c69be474eb3d5e4 /drivers/gpu/drm/xe/xe_guc_submit.c
parent34f50cc6441b7fee4a86495d5ef43da5d254bad9 (diff)
drm/xe: Drop GuC submit_wq pool
Now that drm sched uses a single lockdep map for all submit_wq, drop the GuC submit_wq pool hack. Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Nirmoy Das <nirmoy.das@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20241002131639.3425022-3-matthew.brost@intel.com Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Acked-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_guc_submit.c')
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c60
1 files changed, 1 insertions, 59 deletions
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index fbbe6a487bbb..17c25f18e286 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -224,64 +224,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
EXEC_QUEUE_STATE_BANNED));
}
-#ifdef CONFIG_PROVE_LOCKING
-static int alloc_submit_wq(struct xe_guc *guc)
-{
- int i;
-
- for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
- guc->submission_state.submit_wq_pool[i] =
- alloc_ordered_workqueue("submit_wq", 0);
- if (!guc->submission_state.submit_wq_pool[i])
- goto err_free;
- }
-
- return 0;
-
-err_free:
- while (i)
- destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
-
- return -ENOMEM;
-}
-
-static void free_submit_wq(struct xe_guc *guc)
-{
- int i;
-
- for (i = 0; i < NUM_SUBMIT_WQ; ++i)
- destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
-}
-
-static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
-{
- int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
-
- return guc->submission_state.submit_wq_pool[idx];
-}
-#else
-static int alloc_submit_wq(struct xe_guc *guc)
-{
- return 0;
-}
-
-static void free_submit_wq(struct xe_guc *guc)
-{
-
-}
-
-static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
-{
- return NULL;
-}
-#endif
-
static void guc_submit_fini(struct drm_device *drm, void *arg)
{
struct xe_guc *guc = arg;
xa_destroy(&guc->submission_state.exec_queue_lookup);
- free_submit_wq(guc);
}
static void guc_submit_wedged_fini(void *arg)
@@ -337,10 +284,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
if (err)
return err;
- err = alloc_submit_wq(guc);
- if (err)
- return err;
-
gt->exec_queue_ops = &guc_exec_queue_ops;
xa_init(&guc->submission_state.exec_queue_lookup);
@@ -1452,8 +1395,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
- get_submit_wq(guc),
- q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
+ NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
timeout, guc_to_gt(guc)->ordered_wq, NULL,
q->name, gt_to_xe(q->gt)->drm.dev);
if (err)