diff options
author | Chris Wilson <[email protected]> | 2022-12-01 00:58:02 +0100 |
---|---|---|
committer | Andi Shyti <[email protected]> | 2022-12-06 10:52:42 +0100 |
commit | 8e4ee5e87ce60be439eca8d3a65bd870f6821902 (patch) | |
tree | b61449f19f414017c88b9f840b74659c7b8c7895 /drivers/gpu/drm/i915/selftests/i915_request.c | |
parent | 09f9b4418e417b6452d1bcd7a9544a68fc1e59d5 (diff) |
drm/i915: Wrap all access to i915_vma.node.start|size
We already wrap i915_vma.node.start for use with the GGTT, as there we
can perform additional sanity checks that the node belongs to the GGTT
and fits within the 32b registers. In the next couple of patches, we
will introduce guard pages around the objects _inside_ the drm_mm_node
allocation. That is we will offset the vma->pages so that the first page
is at drm_mm_node.start + vma->guard (not 0 as is currently the case).
All users must then not use i915_vma.node.start directly, but compute
the guard offset, thus all users are converted to use a
i915_vma_offset() wrapper.
The notable exceptions are the selftests that are testing exact
behaviour of i915_vma_pin/i915_vma_insert.
Signed-off-by: Chris Wilson <[email protected]>
Signed-off-by: Tejas Upadhyay <[email protected]>
Co-developed-by: Thomas Hellström <[email protected]>
Signed-off-by: Thomas Hellström <[email protected]>
Signed-off-by: Tvrtko Ursulin <[email protected]>
Signed-off-by: Andi Shyti <[email protected]>
Reviewed-by: Tvrtko Ursulin <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_request.c')
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_request.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 0daa8669181d..6fe22b096bdd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -1017,8 +1017,8 @@ empty_request(struct intel_engine_cs *engine, return request; err = engine->emit_bb_start(request, - batch->node.start, - batch->node.size, + i915_vma_offset(batch), + i915_vma_size(batch), I915_DISPATCH_SECURE); if (err) goto out_request; @@ -1138,14 +1138,14 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) if (ver >= 8) { *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; - *cmd++ = lower_32_bits(vma->node.start); - *cmd++ = upper_32_bits(vma->node.start); + *cmd++ = lower_32_bits(i915_vma_offset(vma)); + *cmd++ = upper_32_bits(i915_vma_offset(vma)); } else if (ver >= 6) { *cmd++ = MI_BATCH_BUFFER_START | 1 << 8; - *cmd++ = lower_32_bits(vma->node.start); + *cmd++ = lower_32_bits(i915_vma_offset(vma)); } else { *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; - *cmd++ = lower_32_bits(vma->node.start); + *cmd++ = lower_32_bits(i915_vma_offset(vma)); } *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ @@ -1227,8 +1227,8 @@ static int live_all_engines(void *arg) GEM_BUG_ON(err); err = engine->emit_bb_start(request[idx], - batch->node.start, - batch->node.size, + i915_vma_offset(batch), + i915_vma_size(batch), 0); GEM_BUG_ON(err); request[idx]->batch = batch; @@ -1354,8 +1354,8 @@ static int live_sequential_engines(void *arg) GEM_BUG_ON(err); err = engine->emit_bb_start(request[idx], - batch->node.start, - batch->node.size, + i915_vma_offset(batch), + i915_vma_size(batch), 0); GEM_BUG_ON(err); request[idx]->batch = batch; |