aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorLucas De Marchi <lucas.demarchi@intel.com>2024-02-22 06:41:24 -0800
committerLucas De Marchi <lucas.demarchi@intel.com>2024-02-22 14:22:39 -0800
commit7a975748d4dc0a524c99a390c6f74b7097ef8cf7 (patch)
tree14ff645403638697c745cdb0f73f572c046792f7 /drivers/gpu/drm
parenta24d9099777d9c314c984b94653407710c2358b4 (diff)
drm/xe: Use pointers in trace events
Commit a0df2cc858c3 ("drm/xe/xe_bo_move: Enhance xe_bo_move trace") inadvertently reverted commit 8d038f49c1f3 ("drm/xe: Fix cast on trace variable"), breaking the build on 32bits. As noted by Ville, there's no point in converting the pointers to u64 and add casts everywhere. In fact, it's better to just use %p and let the address be hashed. Convert all the cases in xe_trace.h to use pointers. Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Matt Roper <matthew.d.roper@intel.com> Cc: Priyanka Dandamudi <priyanka.dandamudi@intel.com> Cc: Oak Zeng <oak.zeng@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240222144125.2862546-1-lucas.demarchi@intel.com
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 0cce98a6b14b..3b97633d81d8 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -27,16 +27,16 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(u64, fence)
+ __field(struct xe_gt_tlb_invalidation_fence *, fence)
__field(int, seqno)
),
TP_fast_assign(
- __entry->fence = (u64)fence;
+ __entry->fence = fence;
__entry->seqno = fence->seqno;
),
- TP_printk("fence=0x%016llx, seqno=%d",
+ TP_printk("fence=%p, seqno=%d",
__entry->fence, __entry->seqno)
);
@@ -83,16 +83,16 @@ DECLARE_EVENT_CLASS(xe_bo,
TP_STRUCT__entry(
__field(size_t, size)
__field(u32, flags)
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
),
TP_fast_assign(
__entry->size = bo->size;
__entry->flags = bo->flags;
- __entry->vm = (unsigned long)bo->vm;
+ __entry->vm = bo->vm;
),
- TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx",
+ TP_printk("size=%zu, flags=0x%02x, vm=%p",
__entry->size, __entry->flags, __entry->vm)
);
@@ -346,16 +346,16 @@ DECLARE_EVENT_CLASS(xe_hw_fence,
TP_STRUCT__entry(
__field(u64, ctx)
__field(u32, seqno)
- __field(u64, fence)
+ __field(struct xe_hw_fence *, fence)
),
TP_fast_assign(
__entry->ctx = fence->dma.context;
__entry->seqno = fence->dma.seqno;
- __entry->fence = (unsigned long)fence;
+ __entry->fence = fence;
),
- TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
+ TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
__entry->ctx, __entry->fence, __entry->seqno)
);
@@ -384,7 +384,7 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_ARGS(vma),
TP_STRUCT__entry(
- __field(u64, vma)
+ __field(struct xe_vma *, vma)
__field(u32, asid)
__field(u64, start)
__field(u64, end)
@@ -392,14 +392,14 @@ DECLARE_EVENT_CLASS(xe_vma,
),
TP_fast_assign(
- __entry->vma = (unsigned long)vma;
+ __entry->vma = vma;
__entry->asid = xe_vma_vm(vma)->usm.asid;
__entry->start = xe_vma_start(vma);
__entry->end = xe_vma_end(vma) - 1;
__entry->ptr = xe_vma_userptr(vma);
),
- TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
+ TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
__entry->vma, __entry->asid, __entry->start,
__entry->end, __entry->ptr)
)
@@ -484,16 +484,16 @@ DECLARE_EVENT_CLASS(xe_vm,
TP_ARGS(vm),
TP_STRUCT__entry(
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
__field(u32, asid)
),
TP_fast_assign(
- __entry->vm = (unsigned long)vm;
+ __entry->vm = vm;
__entry->asid = vm->usm.asid;
),
- TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm,
+ TP_printk("vm=%p, asid=0x%05x", __entry->vm,
__entry->asid)
);