aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/selftests/intel_memory_region.c
diff options
context:
space:
mode:
authorChris Wilson <[email protected]>2020-12-02 17:34:44 +0000
committerChris Wilson <[email protected]>2020-12-02 20:59:29 +0000
commit7d1a31e128d3cb939cd70c95f898c13f85155571 (patch)
tree5cdffe81cc1fdb8d5a58437c9fa99061ef048fab /drivers/gpu/drm/i915/selftests/intel_memory_region.c
parenta2843b3bd17e5a1c6b270709dc5bb0091eba1074 (diff)
Revert "drm/i915/lmem: Limit block size to 4G"
Mixing I915_ALLOC_CONTIGUOUS and I915_ALLOC_MAX_SEGMENT_SIZE fared badly. The two directives conflict, with the contiguous request setting the min_order to the full size of the object, and the max-segment-size setting the max_order to the limit of the DMA mapper. This results in a situation where max_order < min_order, causing our sanity checks to fail. Instead of limiting the buddy block size, in the previous patch we split the oversized buddy into multiple scatterlist elements. Fixes: d2cf0125d4a1 ("drm/i915/lmem: Limit block size to 4G") Signed-off-by: Chris Wilson <[email protected]> Cc: Niranjana Vishwanathapura <[email protected]> Cc: Matthew Auld <[email protected]> Reviewed-by: Matthew Auld <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/intel_memory_region.c')
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 7c02a0c16fc1..a0b518c255de 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -356,21 +356,21 @@ out_put:
static int igt_mock_max_segment(void *arg)
{
+ const unsigned int max_segment = i915_sg_segment_size();
struct intel_memory_region *mem = arg;
struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
struct i915_buddy_block *block;
+ struct scatterlist *sg;
LIST_HEAD(objects);
u64 size;
int err = 0;
/*
- * The size of block are only limited by the largest power-of-two that
- * will fit in the region size, but to construct an object we also
- * require feeding it into an sg list, where the upper limit of the sg
- * entry is at most UINT_MAX, therefore when allocating with
- * I915_ALLOC_MAX_SEGMENT_SIZE we shouldn't see blocks larger than
- * i915_sg_segment_size().
+ * While we may create very large contiguous blocks, we may need
+ * to break those down for consumption elsewhere. In particular,
+ * dma-mapping with scatterlist elements have an implicit limit of
+ * UINT_MAX on each element.
*/
size = SZ_8G;
@@ -384,12 +384,23 @@ static int igt_mock_max_segment(void *arg)
goto out_put;
}
+ err = -EINVAL;
list_for_each_entry(block, &obj->mm.blocks, link) {
- if (i915_buddy_block_size(&mem->mm, block) > i915_sg_segment_size()) {
- pr_err("%s found block size(%llu) larger than max sg_segment_size(%u)",
- __func__,
- i915_buddy_block_size(&mem->mm, block),
- i915_sg_segment_size());
+ if (i915_buddy_block_size(&mem->mm, block) > max_segment) {
+ err = 0;
+ break;
+ }
+ }
+ if (err) {
+ pr_err("%s: Failed to create a huge contiguous block\n",
+ __func__);
+ goto out_close;
+ }
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (sg->length > max_segment) {
+ pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
+ __func__, sg->length, max_segment);
err = -EINVAL;
goto out_close;
}