aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gem
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c19
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c14
17 files changed, 90 insertions, 65 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 6d639ca24dfb..5402a7bbcb1d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -364,7 +364,7 @@ static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
const struct drm_i915_gem_context_param *args)
{
- struct drm_i915_private *i915 = fpriv->dev_priv;
+ struct drm_i915_private *i915 = fpriv->i915;
struct i915_address_space *vm;
if (args->size)
@@ -733,7 +733,7 @@ static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
const struct drm_i915_gem_context_param *args)
{
- struct drm_i915_private *i915 = fpriv->dev_priv;
+ struct drm_i915_private *i915 = fpriv->i915;
struct set_proto_ctx_engines set = { .i915 = i915 };
struct i915_context_param_engines __user *user =
u64_to_user_ptr(args->value);
@@ -813,7 +813,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
struct i915_gem_proto_context *pc,
struct drm_i915_gem_context_param *args)
{
- struct drm_i915_private *i915 = fpriv->dev_priv;
+ struct drm_i915_private *i915 = fpriv->i915;
struct drm_i915_gem_context_param_sseu user_sseu;
struct intel_sseu *sseu;
int ret;
@@ -913,7 +913,7 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
break;
case I915_CONTEXT_PARAM_PRIORITY:
- ret = validate_priority(fpriv->dev_priv, args);
+ ret = validate_priority(fpriv->i915, args);
if (!ret)
pc->sched.priority = args->value;
break;
@@ -934,12 +934,12 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
if (args->size)
ret = -EINVAL;
else
- ret = proto_context_set_persistence(fpriv->dev_priv, pc,
+ ret = proto_context_set_persistence(fpriv->i915, pc,
args->value);
break;
case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
- ret = proto_context_set_protected(fpriv->dev_priv, pc,
+ ret = proto_context_set_protected(fpriv->i915, pc,
args->value);
break;
@@ -1770,7 +1770,7 @@ void i915_gem_context_close(struct drm_file *file)
unsigned long idx;
xa_for_each(&file_priv->proto_context_xa, idx, pc)
- proto_context_close(file_priv->dev_priv, pc);
+ proto_context_close(file_priv->i915, pc);
xa_destroy(&file_priv->proto_context_xa);
mutex_destroy(&file_priv->proto_context_lock);
@@ -2206,7 +2206,7 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
lockdep_assert_held(&file_priv->proto_context_lock);
- ctx = i915_gem_create_context(file_priv->dev_priv, pc);
+ ctx = i915_gem_create_context(file_priv->i915, pc);
if (IS_ERR(ctx))
return ctx;
@@ -2223,7 +2223,7 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
old = xa_erase(&file_priv->proto_context_xa, id);
GEM_BUG_ON(old != pc);
- proto_context_close(file_priv->dev_priv, pc);
+ proto_context_close(file_priv->i915, pc);
return ctx;
}
@@ -2352,7 +2352,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
GEM_WARN_ON(ctx && pc);
if (pc)
- proto_context_close(file_priv->dev_priv, pc);
+ proto_context_close(file_priv->i915, pc);
if (ctx)
context_close(ctx);
@@ -2505,7 +2505,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
* GEM_CONTEXT_CREATE starting with graphics
* version 13.
*/
- WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
+ WARN_ON(GRAPHICS_VER(file_priv->i915) > 12);
ret = set_proto_ctx_param(file_priv, pc, args);
} else {
ret = -ENOENT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index e76c9703680e..bfe1dbda4cb7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -144,7 +144,8 @@ object_free:
}
/**
- * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT
+ * __i915_gem_object_create_user - Creates a new object using the same path as
+ * DRM_I915_GEM_CREATE_EXT
* @i915: i915 private
* @size: size of the buffer, in bytes
* @placements: possible placement regions, in priority order
@@ -215,7 +216,7 @@ i915_gem_dumb_create(struct drm_file *file,
}
/**
- * Creates a new mm object and returns a handle to it.
+ * i915_gem_create_ioctl - Creates a new mm object and returns a handle to it.
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
@@ -399,7 +400,7 @@ static const i915_user_extension_fn create_extensions[] = {
};
/**
- * Creates a new mm object and returns a handle to it.
+ * i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it.
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 497de40b8e68..d2d5a24301b2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -116,7 +116,8 @@ void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj)
}
/**
- * Moves a single object to the WC read, and possibly write domain.
+ * i915_gem_object_set_to_wc_domain - Moves a single object to the WC read, and
+ * possibly write domain.
* @obj: object to act on
* @write: ask for write access or read only
*
@@ -177,7 +178,8 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
}
/**
- * Moves a single object to the GTT read, and possibly write domain.
+ * i915_gem_object_set_to_gtt_domain - Moves a single object to the GTT read,
+ * and possibly write domain.
* @obj: object to act on
* @write: ask for write access or read only
*
@@ -246,7 +248,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
}
/**
- * Changes the cache-level of an object across all VMA.
+ * i915_gem_object_set_cache_level - Changes the cache-level of an object across all VMA.
* @obj: object to act on
* @cache_level: new cache level to set for the object
*
@@ -467,7 +469,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
}
/**
- * Moves a single object to the CPU read, and possibly write domain.
+ * i915_gem_object_set_to_cpu_domain - Moves a single object to the CPU read,
+ * and possibly write domain.
* @obj: object to act on
* @write: requesting write or read-only access
*
@@ -511,7 +514,8 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
}
/**
- * Called when user space prepares to use an object with the CPU, either
+ * i915_gem_set_domain_ioctl - Called when user space prepares to use an
+ * object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
* @dev: drm device
* @data: ioctl data blob
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 9dce2957b4e5..3aeede6aee4d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2449,11 +2449,6 @@ static int eb_submit(struct i915_execbuffer *eb)
return err;
}
-static int num_vcs_engines(struct drm_i915_private *i915)
-{
- return hweight_long(VDBOX_MASK(to_gt(i915)));
-}
-
/*
* Find one BSD ring to dispatch the corresponding BSD command.
* The engine index is returned.
@@ -2467,7 +2462,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine =
- get_random_u32_below(num_vcs_engines(dev_priv));
+ get_random_u32_below(dev_priv->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]);
return file_priv->bsd_engine;
}
@@ -2655,7 +2650,8 @@ eb_select_legacy_ring(struct i915_execbuffer *eb)
return -1;
}
- if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
+ if (user_ring_id == I915_EXEC_BSD &&
+ i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO] > 1) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 8949fb0a944f..3198b64ad7db 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -127,7 +127,8 @@ i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
memcpy(map, data, size);
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_flush_map(obj);
+ __i915_gem_object_release_map(obj);
return obj;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index e6d4efde4fc5..4666bb82f312 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -875,7 +875,7 @@ int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
return ret < 0 ? ret : 0;
}
-/**
+/*
* i915_gem_object_has_unknown_state - Return true if the object backing pages are
* in an unknown_state. This means that userspace must NEVER be allowed to touch
* the pages, with either the GPU or CPU.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index f9a8acbba715..885ccde9dc3c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -303,7 +303,7 @@ i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
- return READ_ONCE(obj->frontbuffer);
+ return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
}
static inline unsigned int
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 19c9bdd8f905..5dcbbef31d44 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -491,6 +491,9 @@ struct drm_i915_gem_object {
*/
unsigned int cache_dirty:1;
+ /* @is_dpt: Object houses a display page table (DPT) */
+ unsigned int is_dpt:1;
+
/**
* @read_domains: Read memory domains.
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 90a967374b1a..8ac376c24aa2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -890,8 +890,9 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
/*
* MTL dsm size is in GGC register.
- * Also MTL uses offset to DSMBASE in ptes, so i915
- * uses dsm_base = 0 to setup stolen region.
+ * Also MTL uses offset to GSMBASE in ptes, so i915
+ * uses dsm_base = 8MBs to setup stolen region, since
+ * DSMBASE = GSMBASE + 8MB.
*/
ret = mtl_get_gms_size(uncore);
if (ret < 0) {
@@ -899,27 +900,25 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
return ERR_PTR(ret);
}
- dsm_base = 0;
+ dsm_base = SZ_8M;
dsm_size = (resource_size_t)(ret * SZ_1M);
GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M);
- GEM_BUG_ON((dsm_size + SZ_8M) > lmem_size);
+ GEM_BUG_ON((dsm_base + dsm_size) > lmem_size);
} else {
/* Use DSM base address instead for stolen memory */
dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK;
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
- dsm_size = lmem_size - dsm_base;
+ dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M);
}
- io_size = dsm_size;
- if (HAS_LMEMBAR_SMEM_STOLEN(i915)) {
- io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + SZ_8M;
- } else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
+ if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
io_start = 0;
io_size = 0;
} else {
io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
+ io_size = dsm_size;
}
min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 7420276827a5..9227f8146a58 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -472,7 +472,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
struct ttm_placement place = {};
int ret;
- if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
+ if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource))
return 0;
GEM_BUG_ON(!i915_tt->is_shmem);
@@ -511,7 +511,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (bo->resource && !i915_ttm_is_ghost_object(bo)) {
+ /*
+ * This gets called twice by ttm, so long as we have a ttm resource or
+ * ttm_tt then we can still safely call this. Due to pipeline-gutting,
+ * we maybe have NULL bo->resource, but in that case we should always
+ * have a ttm alive (like if the pages are swapped out).
+ */
+ if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) {
__i915_gem_object_pages_fini(obj);
i915_ttm_free_cached_io_rsgt(obj);
}
@@ -1067,11 +1073,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
.interruptible = true,
.no_wait_gpu = true, /* should be idle already */
};
+ int err;
GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
- ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
- if (ret) {
+ err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+ if (err) {
dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS;
}
@@ -1267,7 +1274,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
}
}
-/**
+/*
* __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
* @mem: The initial memory region for the object.
* @obj: The gem object.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 2a94a99ef76b..f8f6bed1b297 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -98,7 +98,7 @@ static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem)
static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
{
/* Once / if we support GGTT, this is also false for cached ttm_tts */
- return mem->mem_type != I915_PL_SYSTEM;
+ return mem && mem->mem_type != I915_PL_SYSTEM;
}
bool i915_ttm_resource_mappable(struct ttm_resource *res);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 76dd9e5e1a8b..dd188dfcc423 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -253,6 +253,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
* @_src_iter: Storage space for the source kmap iterator.
* @dst_iter: Pointer to the destination kmap iterator.
* @src_iter: Pointer to the source kmap iterator.
+ * @num_pages: Number of pages
* @clear: Whether to clear instead of copy.
* @src_rsgt: Refcounted scatter-gather list of source memory.
* @dst_rsgt: Refcounted scatter-gather list of destination memory.
@@ -557,6 +558,8 @@ out:
* i915_ttm_move - The TTM move callback used by i915.
* @bo: The buffer object.
* @evict: Whether this is an eviction.
+ * @ctx: Pointer to a struct ttm_operation_ctx indicating how the waits should be
+ * performed if waiting
* @dst_mem: The destination ttm resource.
* @hop: If we need multihop, what temporary memory type to move to.
*
@@ -711,6 +714,10 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
assert_object_held(dst);
assert_object_held(src);
+
+ if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource))
+ return -EINVAL;
+
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
ret = dma_resv_reserve_fences(src_bo->base.resv, 1);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 7e67742bc65e..ad649523d5e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -53,7 +53,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
unsigned int flags;
int err = 0;
- if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
+ if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup)
return 0;
if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
@@ -144,8 +144,7 @@ void i915_ttm_recover_region(struct intel_memory_region *mr)
/**
* i915_ttm_backup_region - Back up all objects of a region to smem.
* @mr: The memory region
- * @allow_gpu: Whether to allow the gpu blitter for this backup.
- * @backup_pinned: Backup also pinned objects.
+ * @flags: TTM backup flags
*
* Loops over all objects of a region and either evicts them if they are
* evictable or backs them up using a backup object if they are pinned.
@@ -187,7 +186,10 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
return err;
/* Content may have been swapped. */
- err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ if (!backup_bo->resource)
+ err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
+ if (!err)
+ err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
if (!err) {
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);
@@ -209,7 +211,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
/**
* i915_ttm_restore_region - Restore backed-up objects of a region from smem.
* @mr: The memory region
- * @allow_gpu: Whether to allow the gpu blitter to recover.
+ * @flags: TTM backup flags
*
* Loops over all objects of a region and if they are backed-up, restores
* them from smem.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index e6e01c2a74a6..4a33ad2d122b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -161,7 +161,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
}
/**
- * Waits for rendering to the object to be completed
+ * i915_gem_object_wait - Waits for rendering to the object to be completed
* @obj: i915 gem object
* @flags: how to wait (under a lock, for all rendering or just for writes etc)
* @timeout: how long to wait
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index defece0bcb81..99f39a5feca1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -115,7 +115,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
do {
struct page *page;
- GEM_BUG_ON(order >= MAX_ORDER);
+ GEM_BUG_ON(order > MAX_ORDER);
page = alloc_pages(GFP | __GFP_ZERO, order);
if (!page)
goto err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 3bb1f7f0110e..ff81af4c8202 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -108,31 +108,30 @@ struct tiled_blits {
u32 height;
};
-static bool supports_x_tiling(const struct drm_i915_private *i915)
+static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
{
int gen = GRAPHICS_VER(i915);
+ /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
+ drm_WARN_ON(&i915->drm, gen < 9);
+
if (gen < 12)
return true;
- if (!HAS_LMEM(i915) || IS_DG1(i915))
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
return false;
- return true;
+ return HAS_DISPLAY(i915);
}
static bool fast_blit_ok(const struct blit_buffer *buf)
{
- int gen = GRAPHICS_VER(buf->vma->vm->i915);
-
- if (gen < 9)
+ /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
+ if (GRAPHICS_VER(buf->vma->vm->i915) < 9)
return false;
- if (gen < 12)
- return true;
-
/* filter out platforms with unsupported X-tile support in fastblit */
- if (buf->tiling == CLIENT_TILING_X && !supports_x_tiling(buf->vma->vm->i915))
+ if (buf->tiling == CLIENT_TILING_X && !fastblit_supports_x_tiling(buf->vma->vm->i915))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index a81fa6a20f5a..7b516b1a4915 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -346,8 +346,10 @@ static int live_parallel_switch(void *arg)
continue;
ce = intel_context_create(data[m].ce[0]->engine);
- if (IS_ERR(ce))
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
goto out;
+ }
err = intel_context_pin(ce);
if (err) {
@@ -367,8 +369,10 @@ static int live_parallel_switch(void *arg)
worker = kthread_create_worker(0, "igt/parallel:%s",
data[n].ce[0]->engine->name);
- if (IS_ERR(worker))
+ if (IS_ERR(worker)) {
+ err = PTR_ERR(worker);
goto out;
+ }
data[n].worker = worker;
}
@@ -397,8 +401,10 @@ static int live_parallel_switch(void *arg)
}
}
- if (igt_live_test_end(&t))
- err = -EIO;
+ if (igt_live_test_end(&t)) {
+ err = err ?: -EIO;
+ break;
+ }
}
out: