diff options
author | Matthew Brost <matthew.brost@intel.com> | 2023-01-17 21:11:43 -0800 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-19 18:27:45 -0500 |
commit | fc108a8b759f52b879e9a39642ee7988d251e453 (patch) | |
tree | 7d4238ba97b09c26176c888161b5d13e7d80bd49 /drivers/gpu/drm/xe | |
parent | 62ad062150c2ab72b0881c2f24f710e4c0bc4cd7 (diff) |
drm/xe: Add TLB invalidation fence
Fence will be signaled when TLB invalidation completion.
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe')
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt_debugfs.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt_pagefault.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 43 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_gt_types.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 2 |
8 files changed, 80 insertions, 7 deletions
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 96136f130eda..28bbb3159531 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -669,6 +669,7 @@ static int gt_reset(struct xe_gt *gt) xe_uc_stop_prepare(>->uc); xe_gt_pagefault_reset(gt); + xe_gt_tlb_invalidation_reset(gt); err = xe_uc_stop(>->uc); if (err) diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c index ea308b123474..946398f08bb5 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c @@ -99,7 +99,7 @@ static int invalidate_tlb(struct seq_file *m, void *data) int seqno; int ret = 0; - seqno = xe_gt_tlb_invalidation(gt); + seqno = xe_gt_tlb_invalidation(gt, NULL); XE_WARN_ON(seqno < 0); if (seqno > 0) ret = xe_gt_tlb_invalidation_wait(gt, seqno); diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index 93a8efe5d0a0..705093cb63d7 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -245,7 +245,7 @@ unlock_vm: * defer TLB invalidate + fault response to a callback of fence * too */ - ret = xe_gt_tlb_invalidation(gt); + ret = xe_gt_tlb_invalidation(gt, NULL); if (ret >= 0) ret = 0; } diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index a39a2fb163ae..0058a155eeb9 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -17,11 +17,27 @@ guc_to_gt(struct xe_guc *guc) int xe_gt_tlb_invalidation_init(struct xe_gt *gt) { gt->tlb_invalidation.seqno = 1; + INIT_LIST_HEAD(>->tlb_invalidation.pending_fences); return 0; } -static int send_tlb_invalidation(struct xe_guc *guc) +void xe_gt_tlb_invalidation_reset(struct xe_gt *gt) +{ + struct xe_gt_tlb_invalidation_fence *fence, *next; + + mutex_lock(>->uc.guc.ct.lock); + list_for_each_entry_safe(fence, next, + >->tlb_invalidation.pending_fences, link) { + list_del(&fence->link); + dma_fence_signal(&fence->base); + dma_fence_put(&fence->base); + } + mutex_unlock(>->uc.guc.ct.lock); +} + +static int send_tlb_invalidation(struct xe_guc *guc, + struct xe_gt_tlb_invalidation_fence *fence) { struct xe_gt *gt = guc_to_gt(guc); u32 action[] = { @@ -41,6 +57,15 @@ static int send_tlb_invalidation(struct xe_guc *guc) */ mutex_lock(&guc->ct.lock); seqno = gt->tlb_invalidation.seqno; + if (fence) { + /* + * FIXME: How to deal TLB invalidation timeout, right now we + * just have an endless fence which isn't ideal. + */ + fence->seqno = seqno; + list_add_tail(&fence->link, + >->tlb_invalidation.pending_fences); + } action[1] = seqno; gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) % TLB_INVALIDATION_SEQNO_MAX; @@ -55,9 +80,10 @@ static int send_tlb_invalidation(struct xe_guc *guc) return ret; } -int xe_gt_tlb_invalidation(struct xe_gt *gt) +int xe_gt_tlb_invalidation(struct xe_gt *gt, + struct xe_gt_tlb_invalidation_fence *fence) { - return send_tlb_invalidation(>->uc.guc); + return send_tlb_invalidation(>->uc.guc, fence); } static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno) @@ -97,8 +123,11 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno) int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len) { struct xe_gt *gt = guc_to_gt(guc); + struct xe_gt_tlb_invalidation_fence *fence; int expected_seqno; + lockdep_assert_held(&guc->ct.lock); + if (unlikely(len != 1)) return -EPROTO; @@ -111,5 +140,13 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len) smp_wmb(); wake_up_all(&guc->ct.wq); + fence = list_first_entry_or_null(>->tlb_invalidation.pending_fences, + typeof(*fence), link); + if (fence && tlb_invalidation_seqno_past(gt, fence->seqno)) { + list_del(&fence->link); + dma_fence_signal(&fence->base); + dma_fence_put(&fence->base); + } + return 0; } diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h index f1c3b34b1993..7e6fbf46f0e3 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h @@ -8,11 +8,15 @@ #include <linux/types.h> +#include "xe_gt_tlb_invalidation_types.h" + struct xe_gt; struct xe_guc; int xe_gt_tlb_invalidation_init(struct xe_gt *gt); -int xe_gt_tlb_invalidation(struct xe_gt *gt); +void xe_gt_tlb_invalidation_reset(struct xe_gt *gt); +int xe_gt_tlb_invalidation(struct xe_gt *gt, + struct xe_gt_tlb_invalidation_fence *fence); int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno); int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len); diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h new file mode 100644 index 000000000000..ab57c14c6d14 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_GT_TLB_INVALIDATION_TYPES_H_ +#define _XE_GT_TLB_INVALIDATION_TYPES_H_ + +#include <linux/dma-fence.h> + +/** + * struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence + * + * Optionally passed to xe_gt_tlb_invalidation and will be signaled upon TLB + * invalidation completion. + */ +struct xe_gt_tlb_invalidation_fence { + /** @base: dma fence base */ + struct dma_fence base; + /** @link: link into list of pending tlb fences */ + struct list_head link; + /** @seqno: seqno of TLB invalidation to signal fence one */ + int seqno; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index 3bfce7abe857..a755e3a86552 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -169,6 +169,11 @@ struct xe_gt { * @seqno_recv: last received TLB invalidation seqno, protected by CT lock */ int seqno_recv; + /** + * @pending_fences: list of pending fences waiting TLB + * invaliations, protected by CT lock + */ + struct list_head pending_fences; } tlb_invalidation; /** @usm: unified shared memory state */ diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index c548cd04f9cf..aae9acc7759a 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -3345,7 +3345,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) if (xe_pt_zap_ptes(gt, vma)) { gt_needs_invalidate |= BIT(id); xe_device_wmb(xe); - seqno[id] = xe_gt_tlb_invalidation(gt); + seqno[id] = xe_gt_tlb_invalidation(gt, NULL); if (seqno[id] < 0) return seqno[id]; } |