diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9b99490e8367..ed6117a0ee84 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -151,7 +151,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; struct sg_table *st; struct scatterlist *sg; @@ -218,7 +218,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) obj->dirty = 0; if (obj->dirty) { - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; int i; @@ -1456,7 +1456,10 @@ i915_wait_request(struct drm_i915_gem_request *req) if (ret) return ret; - __i915_gem_request_retire__upto(req); + /* If the GPU hung, we want to keep the requests to find the guilty. */ + if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error)) + __i915_gem_request_retire__upto(req); + return 0; } @@ -1513,7 +1516,8 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj, else if (obj->last_write_req == req) i915_gem_object_retire__write(obj); - __i915_gem_request_retire__upto(req); + if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error)) + __i915_gem_request_retire__upto(req); } /* A nonblocking variant of the above wait. This is a highly dangerous routine @@ -2151,7 +2155,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj) if (obj->base.filp == NULL) return; - mapping = file_inode(obj->base.filp)->i_mapping, + mapping = obj->base.filp->f_mapping, invalidate_mapping_pages(mapping, 0, (loff_t)-1); } @@ -2267,7 +2271,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) * * Fail silently without starting the shrinker */ - mapping = file_inode(obj->base.filp)->i_mapping; + mapping = obj->base.filp->f_mapping; gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); gfp |= __GFP_NORETRY | __GFP_NOWARN; sg = st->sgl; @@ -4518,7 +4522,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, mask |= __GFP_DMA32; } - mapping = file_inode(obj->base.filp)->i_mapping; + mapping = obj->base.filp->f_mapping; mapping_set_gfp_mask(mapping, mask); i915_gem_object_init(obj, &i915_gem_object_ops); @@ -4860,9 +4864,6 @@ i915_gem_init_hw(struct drm_device *dev) struct intel_engine_cs *engine; int ret, j; - if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) - return -EIO; - /* Double layer security blanket, see i915_gem_init() */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |