diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 79 |
1 files changed, 54 insertions, 25 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2bdddb61ebd7..35950fa91406 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -229,8 +229,9 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, struct drm_i915_gem_pread *args) { unsigned int needs_clflush; - unsigned int idx, offset; char __user *user_data; + unsigned long offset; + pgoff_t idx; u64 remain; int ret; @@ -383,13 +384,17 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = to_gt(i915)->ggtt; + unsigned long remain, offset; intel_wakeref_t wakeref; struct drm_mm_node node; void __user *user_data; struct i915_vma *vma; - u64 remain, offset; int ret = 0; + if (overflows_type(args->size, remain) || + overflows_type(args->offset, offset)) + return -EINVAL; + wakeref = intel_runtime_pm_get(&i915->runtime_pm); vma = i915_gem_gtt_prepare(obj, &node, false); @@ -540,13 +545,17 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = to_gt(i915)->ggtt; struct intel_runtime_pm *rpm = &i915->runtime_pm; + unsigned long remain, offset; intel_wakeref_t wakeref; struct drm_mm_node node; struct i915_vma *vma; - u64 remain, offset; void __user *user_data; int ret = 0; + if (overflows_type(args->size, remain) || + overflows_type(args->offset, offset)) + return -EINVAL; + if (i915_gem_object_has_struct_page(obj)) { /* * Avoid waking the device up if we can fallback, as @@ -654,8 +663,9 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, { unsigned int partial_cacheline_write; unsigned int needs_clflush; - unsigned int offset, idx; void __user *user_data; + unsigned long offset; + pgoff_t idx; u64 remain; int ret; @@ -843,7 +853,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) __i915_gem_object_release_mmap_gtt(obj); list_for_each_entry_safe(obj, on, - &to_gt(i915)->lmem_userfault_list, userfault_link) + &i915->runtime_pm.lmem_userfault_list, userfault_link) i915_gem_object_runtime_pm_release_mmap_offset(obj); /* @@ -1099,7 +1109,7 @@ void i915_gem_drain_freed_objects(struct drm_i915_private *i915) { while (atomic_read(&i915->mm.free_count)) { flush_work(&i915->mm.free_work); - flush_delayed_work(&i915->bdev.wq); + drain_workqueue(i915->bdev.wq); rcu_barrier(); } } @@ -1128,6 +1138,8 @@ void i915_gem_drain_workqueue(struct drm_i915_private *i915) int i915_gem_init(struct drm_i915_private *dev_priv) { + struct intel_gt *gt; + unsigned int i; int ret; /* We need to fallback to 4K pages if host doesn't support huge gtt. */ @@ -1138,8 +1150,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) return ret; - intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc); - intel_wopcm_init(&dev_priv->wopcm); + for_each_gt(gt, dev_priv, i) { + intel_uc_fetch_firmwares(>->uc); + intel_wopcm_init(>->wopcm); + if (GRAPHICS_VER(dev_priv) >= 8) + setup_private_pat(gt); + } ret = i915_init_ggtt(dev_priv); if (ret) { @@ -1158,9 +1174,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv) */ intel_init_clock_gating(dev_priv); - ret = intel_gt_init(to_gt(dev_priv)); - if (ret) - goto err_unlock; + for_each_gt(gt, dev_priv, i) { + ret = intel_gt_init(gt); + if (ret) + goto err_unlock; + } return 0; @@ -1173,8 +1191,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv) err_unlock: i915_gem_drain_workqueue(dev_priv); - if (ret != -EIO) - intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); + if (ret != -EIO) { + for_each_gt(gt, dev_priv, i) { + intel_gt_driver_remove(gt); + intel_gt_driver_release(gt); + intel_uc_cleanup_firmwares(>->uc); + } + } if (ret == -EIO) { /* @@ -1182,10 +1205,12 @@ err_unlock: * as wedged. But we only want to do this when the GPU is angry, * for all other failure, such as an allocation failure, bail. */ - if (!intel_gt_is_wedged(to_gt(dev_priv))) { - i915_probe_error(dev_priv, - "Failed to initialize GPU, declaring it wedged!\n"); - intel_gt_set_wedged(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) { + if (!intel_gt_is_wedged(gt)) { + i915_probe_error(dev_priv, + "Failed to initialize GPU, declaring it wedged!\n"); + intel_gt_set_wedged(gt); + } } /* Minimal basic recovery for KMS */ @@ -1213,23 +1238,27 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915) void i915_gem_driver_remove(struct drm_i915_private *dev_priv) { - intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref); + struct intel_gt *gt; + unsigned int i; i915_gem_suspend_late(dev_priv); - intel_gt_driver_remove(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_driver_remove(gt); dev_priv->uabi_engines = RB_ROOT; /* Flush any outstanding unpin_work. */ i915_gem_drain_workqueue(dev_priv); - - i915_gem_drain_freed_objects(dev_priv); } void i915_gem_driver_release(struct drm_i915_private *dev_priv) { - intel_gt_driver_release(to_gt(dev_priv)); + struct intel_gt *gt; + unsigned int i; - intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); + for_each_gt(gt, dev_priv, i) { + intel_gt_driver_release(gt); + intel_uc_cleanup_firmwares(>->uc); + } /* Flush any outstanding work, including i915_gem_context.release_work. */ i915_gem_drain_workqueue(dev_priv); @@ -1259,7 +1288,7 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv) void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) { - i915_gem_drain_freed_objects(dev_priv); + i915_gem_drain_workqueue(dev_priv); GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count); @@ -1271,7 +1300,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) struct i915_drm_client *client; int ret = -ENOMEM; - DRM_DEBUG("\n"); + drm_dbg(&i915->drm, "\n"); file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); if (!file_priv) |