aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_guc_ct.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_guc_ct.c')
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c12
1 files changed, 2 insertions, 10 deletions
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 0b086d17c083..9fb5fd4391d2 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -994,15 +994,8 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
return 0;
switch (FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1])) {
- /*
- * FIXME: We really should process
- * XE_GUC_ACTION_TLB_INVALIDATION_DONE here in the fast-path as
- * these critical for page fault performance. We currently can't
- * due to TLB invalidation done algorithm expecting the seqno
- * returned in-order. With some small changes to the algorithm
- * and locking we should be able to support out-of-order seqno.
- */
case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
+ case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
break; /* Process these in fast-path */
default:
return 0;
@@ -1056,8 +1049,7 @@ void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
struct xe_device *xe = ct_to_xe(ct);
int len;
- if (!xe_device_in_fault_mode(xe) ||
- !xe_device_mem_access_get_if_ongoing(xe))
+ if (!xe_device_mem_access_get_if_ongoing(xe))
return;
spin_lock(&ct->fast_lock);