diff options
-rw-r--r-- | drivers/gpu/drm/xe/xe_bb.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_bb.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_exec_queue.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_exec_queue_types.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_ring_ops.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_sched_job.c | 6 |
7 files changed, 12 insertions, 37 deletions
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c index 38f4ce83a207..1fbc2fcddc96 100644 --- a/drivers/gpu/drm/xe/xe_bb.c +++ b/drivers/gpu/drm/xe/xe_bb.c @@ -73,16 +73,6 @@ __xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr) return xe_sched_job_create(q, addr); } -struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q, - struct xe_bb *bb, u64 batch_base_ofs) -{ - u64 addr = batch_base_ofs + drm_suballoc_soffset(bb->bo); - - XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION)); - - return __xe_bb_create_job(q, bb, &addr); -} - struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 batch_base_ofs, diff --git a/drivers/gpu/drm/xe/xe_bb.h b/drivers/gpu/drm/xe/xe_bb.h index c5ae0770bab5..fafacd73dcc3 100644 --- a/drivers/gpu/drm/xe/xe_bb.h +++ b/drivers/gpu/drm/xe/xe_bb.h @@ -20,8 +20,6 @@ struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q, struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 batch_ofs, u32 second_idx); -struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q, - struct xe_bb *bb, u64 batch_ofs); void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence); #endif diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 867465b0c57b..f28bceceb99a 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -95,7 +95,7 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, * can perform GuC CT actions when needed. Caller is expected to * have already grabbed the rpm ref outside any sensitive locks. */ - if (q->flags & EXEC_QUEUE_FLAG_VM) + if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM)) drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe)); return q; @@ -174,7 +174,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q) xe_lrc_finish(q->lrc + i); if (q->vm) xe_vm_put(q->vm); - if (q->flags & EXEC_QUEUE_FLAG_VM) + if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM)) xe_device_mem_access_put(gt_to_xe(q->gt)); kfree(q); diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 1f0051a91dae..4f4190971dcf 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -79,8 +79,6 @@ struct xe_exec_queue { #define EXEC_QUEUE_FLAG_VM BIT(5) /* child of VM queue for multi-tile VM jobs */ #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(6) -/* queue used for WA setup */ -#define EXEC_QUEUE_FLAG_WA BIT(7) /** * @flags: flags for this exec queue, should statically setup aside from ban diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 13320af4ddd3..3d6a7c11bac1 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -87,15 +87,13 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q) struct xe_sched_job *job; struct xe_bb *bb; struct dma_fence *fence; - u64 batch_ofs; long timeout; bb = xe_bb_new(gt, 4, false); if (IS_ERR(bb)) return PTR_ERR(bb); - batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo); - job = xe_bb_create_wa_job(q, bb, batch_ofs); + job = xe_bb_create_job(q, bb); if (IS_ERR(job)) { xe_bb_free(bb, NULL); return PTR_ERR(job); @@ -124,7 +122,6 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) struct xe_sched_job *job; struct xe_bb *bb; struct dma_fence *fence; - u64 batch_ofs; long timeout; int count = 0; @@ -143,8 +140,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) } } - batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo); - job = xe_bb_create_wa_job(q, bb, batch_ofs); + job = xe_bb_create_job(q, bb); if (IS_ERR(job)) { xe_bb_free(bb, NULL); return PTR_ERR(job); @@ -168,14 +164,12 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) int xe_gt_record_default_lrcs(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); - struct xe_tile *tile = gt_to_tile(gt); struct xe_hw_engine *hwe; enum xe_hw_engine_id id; int err = 0; for_each_hw_engine(hwe, gt, id) { struct xe_exec_queue *q, *nop_q; - struct xe_vm *vm; void *default_lrc; if (gt->default_lrc[hwe->class]) @@ -192,14 +186,13 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt) if (!default_lrc) return -ENOMEM; - vm = xe_migrate_get_vm(tile->migrate); - q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1, - hwe, EXEC_QUEUE_FLAG_WA); + q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1, + hwe, EXEC_QUEUE_FLAG_KERNEL); if (IS_ERR(q)) { err = PTR_ERR(q); xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n", hwe->name, q); - goto put_vm; + return err; } /* Prime golden LRC with known good state */ @@ -210,8 +203,8 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt) goto put_exec_queue; } - nop_q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), - 1, hwe, EXEC_QUEUE_FLAG_WA); + nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), + 1, hwe, EXEC_QUEUE_FLAG_KERNEL); if (IS_ERR(nop_q)) { err = PTR_ERR(nop_q); xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n", @@ -245,8 +238,6 @@ put_nop_q: xe_exec_queue_put(nop_q); put_exec_queue: xe_exec_queue_put(q); -put_vm: - xe_vm_put(vm); if (err) break; } diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index 2b4127ea1eab..2238a40b7e8e 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -202,7 +202,7 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw, static u32 get_ppgtt_flag(struct xe_sched_job *job) { - return !(job->q->flags & EXEC_QUEUE_FLAG_WA) ? BIT(8) : 0; + return job->q->vm ? BIT(8) : 0; } /* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */ diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index 0479d059dc77..b02183147e8e 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -59,8 +59,7 @@ static struct xe_sched_job *job_alloc(bool parallel) bool xe_sched_job_is_migration(struct xe_exec_queue *q) { - return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION) && - !(q->flags & EXEC_QUEUE_FLAG_WA); + return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION); } static void job_free(struct xe_sched_job *job) @@ -91,8 +90,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q, XE_WARN_ON(!q->vm && !(q->flags & EXEC_QUEUE_FLAG_KERNEL)); /* Migration and kernel engines have their own locking */ - if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM | - EXEC_QUEUE_FLAG_WA))) { + if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) { lockdep_assert_held(&q->vm->lock); if (!xe_vm_no_dma_fences(q->vm)) xe_vm_assert_held(q->vm); |