diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 220 |
1 files changed, 129 insertions, 91 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dcd8d7b42552..f1bb69377a35 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1149,16 +1149,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv) static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, unsigned reset_counter, bool interruptible, - struct timespec *timeout, + s64 *timeout, struct drm_i915_file_private *file_priv) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; const bool irq_test_in_progress = ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); - struct timespec before, now; DEFINE_WAIT(wait); unsigned long timeout_expire; + s64 before, now; int ret; WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); @@ -1166,7 +1166,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) return 0; - timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; + timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { gen6_rps_boost(dev_priv); @@ -1181,7 +1181,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, /* Record current time in case interrupted by signal, or wedged */ trace_i915_gem_request_wait_begin(ring, seqno); - getrawmonotonic(&before); + before = ktime_get_raw_ns(); for (;;) { struct timer_list timer; @@ -1230,7 +1230,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, destroy_timer_on_stack(&timer); } } - getrawmonotonic(&now); + now = ktime_get_raw_ns(); trace_i915_gem_request_wait_end(ring, seqno); if (!irq_test_in_progress) @@ -1239,10 +1239,9 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, finish_wait(&ring->irq_queue, &wait); if (timeout) { - struct timespec sleep_time = timespec_sub(now, before); - *timeout = timespec_sub(*timeout, sleep_time); - if (!timespec_valid(timeout)) /* i.e. negative time remains */ - set_normalized_timespec(timeout, 0, 0); + s64 tres = *timeout - (now - before); + + *timeout = tres < 0 ? 0 : tres; } return ret; @@ -2161,8 +2160,6 @@ static void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_engine_cs *ring) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; u32 seqno = intel_ring_get_seqno(ring); BUG_ON(ring == NULL); @@ -2181,19 +2178,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, list_move_tail(&obj->ring_list, &ring->active_list); obj->last_read_seqno = seqno; - - if (obj->fenced_gpu_access) { - obj->last_fenced_seqno = seqno; - - /* Bump MRU to take account of the delayed flush */ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_fence_reg *reg; - - reg = &dev_priv->fence_regs[obj->fence_reg]; - list_move_tail(®->lru_list, - &dev_priv->mm.fence_list); - } - } } void i915_vma_move_to_active(struct i915_vma *vma, @@ -2229,7 +2213,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) obj->base.write_domain = 0; obj->last_fenced_seqno = 0; - obj->fenced_gpu_access = false; obj->active = 0; drm_gem_object_unreference(&obj->base); @@ -2327,10 +2310,21 @@ int __i915_add_request(struct intel_engine_cs *ring, { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_gem_request *request; + struct intel_ringbuffer *ringbuf; u32 request_ring_position, request_start; int ret; - request_start = intel_ring_get_tail(ring->buffer); + request = ring->preallocated_lazy_request; + if (WARN_ON(request == NULL)) + return -ENOMEM; + + if (i915.enable_execlists) { + struct intel_context *ctx = request->ctx; + ringbuf = ctx->engine[ring->id].ringbuf; + } else + ringbuf = ring->buffer; + + request_start = intel_ring_get_tail(ringbuf); /* * Emit any outstanding flushes - execbuf can fail to emit the flush * after having emitted the batchbuffer command. Hence we need to fix @@ -2338,24 +2332,32 @@ int __i915_add_request(struct intel_engine_cs *ring, * is that the flush _must_ happen before the next request, no matter * what. */ - ret = intel_ring_flush_all_caches(ring); - if (ret) - return ret; - - request = ring->preallocated_lazy_request; - if (WARN_ON(request == NULL)) - return -ENOMEM; + if (i915.enable_execlists) { + ret = logical_ring_flush_all_caches(ringbuf); + if (ret) + return ret; + } else { + ret = intel_ring_flush_all_caches(ring); + if (ret) + return ret; + } /* Record the position of the start of the request so that * should we detect the updated seqno part-way through the * GPU processing the request, we never over-estimate the * position of the head. */ - request_ring_position = intel_ring_get_tail(ring->buffer); + request_ring_position = intel_ring_get_tail(ringbuf); - ret = ring->add_request(ring); - if (ret) - return ret; + if (i915.enable_execlists) { + ret = ring->emit_request(ringbuf); + if (ret) + return ret; + } else { + ret = ring->add_request(ring); + if (ret) + return ret; + } request->seqno = intel_ring_get_seqno(ring); request->ring = ring; @@ -2370,12 +2372,14 @@ int __i915_add_request(struct intel_engine_cs *ring, */ request->batch_obj = obj; - /* Hold a reference to the current context so that we can inspect - * it later in case a hangcheck error event fires. - */ - request->ctx = ring->last_context; - if (request->ctx) - i915_gem_context_reference(request->ctx); + if (!i915.enable_execlists) { + /* Hold a reference to the current context so that we can inspect + * it later in case a hangcheck error event fires. + */ + request->ctx = ring->last_context; + if (request->ctx) + i915_gem_context_reference(request->ctx); + } request->emitted_jiffies = jiffies; list_add_tail(&request->list, &ring->request_list); @@ -2546,6 +2550,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, i915_gem_free_request(request); } + while (!list_empty(&ring->execlist_queue)) { + struct intel_ctx_submit_request *submit_req; + + submit_req = list_first_entry(&ring->execlist_queue, + struct intel_ctx_submit_request, + execlist_link); + list_del(&submit_req->execlist_link); + intel_runtime_pm_put(dev_priv); + i915_gem_context_unreference(submit_req->ctx); + kfree(submit_req); + } + /* These may not have been flush before the reset, do so now */ kfree(ring->preallocated_lazy_request); ring->preallocated_lazy_request = NULL; @@ -2630,6 +2646,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; + struct intel_ringbuffer *ringbuf; request = list_first_entry(&ring->request_list, struct drm_i915_gem_request, @@ -2639,12 +2656,24 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) break; trace_i915_gem_request_retire(ring, request->seqno); + + /* This is one of the few common intersection points + * between legacy ringbuffer submission and execlists: + * we need to tell them apart in order to find the correct + * ringbuffer to which the request belongs to. + */ + if (i915.enable_execlists) { + struct intel_context *ctx = request->ctx; + ringbuf = ctx->engine[ring->id].ringbuf; + } else + ringbuf = ring->buffer; + /* We know the GPU must have read the request to have * sent us the seqno + interrupt, so use the position * of tail of the request to update the last known position * of the GPU head. */ - ring->buffer->last_retired_head = request->tail; + ringbuf->last_retired_head = request->tail; i915_gem_free_request(request); } @@ -2757,16 +2786,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct drm_i915_gem_wait *args = data; struct drm_i915_gem_object *obj; struct intel_engine_cs *ring = NULL; - struct timespec timeout_stack, *timeout = NULL; unsigned reset_counter; u32 seqno = 0; int ret = 0; - if (args->timeout_ns >= 0) { - timeout_stack = ns_to_timespec(args->timeout_ns); - timeout = &timeout_stack; - } - ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -2791,9 +2814,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto out; /* Do this after OLR check to make sure we make forward progress polling - * on this IOCTL with a 0 timeout (like busy ioctl) + * on this IOCTL with a timeout <=0 (like busy ioctl) */ - if (!args->timeout_ns) { + if (args->timeout_ns <= 0) { ret = -ETIME; goto out; } @@ -2802,10 +2825,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); mutex_unlock(&dev->struct_mutex); - ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv); - if (timeout) - args->timeout_ns = timespec_to_ns(timeout); - return ret; + return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns, + file->driver_priv); out: drm_gem_object_unreference(&obj->base); @@ -2928,9 +2949,8 @@ int i915_vma_unbind(struct i915_vma *vma) vma->unbind_vma(vma); list_del_init(&vma->mm_list); - /* Avoid an unnecessary call to unbind on rebind. */ if (i915_is_ggtt(vma->vm)) - obj->map_and_fenceable = true; + obj->map_and_fenceable = false; drm_mm_remove_node(&vma->node); i915_gem_vma_destroy(vma); @@ -3175,7 +3195,6 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) obj->last_fenced_seqno = 0; } - obj->fenced_gpu_access = false; return 0; } @@ -3282,6 +3301,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) return 0; } } else if (enable) { + if (WARN_ON(!obj->map_and_fenceable)) + return -EINVAL; + reg = i915_find_fence_reg(dev); if (IS_ERR(reg)) return PTR_ERR(reg); @@ -3592,11 +3614,12 @@ int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); uint32_t old_write_domain, old_read_domains; int ret; /* Not valid to be called on unbound objects. */ - if (!i915_gem_obj_bound_any(obj)) + if (vma == NULL) return -EINVAL; if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) @@ -3638,13 +3661,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) old_write_domain); /* And bump the LRU for this access */ - if (i915_gem_object_is_inactive(obj)) { - struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); - if (vma) - list_move_tail(&vma->mm_list, - &dev_priv->gtt.base.inactive_list); - - } + if (i915_gem_object_is_inactive(obj)) + list_move_tail(&vma->mm_list, + &dev_priv->gtt.base.inactive_list); return 0; } @@ -3808,9 +3827,6 @@ static bool is_pin_display(struct drm_i915_gem_object *obj) { struct i915_vma *vma; - if (list_empty(&obj->vma_list)) - return false; - vma = i915_gem_obj_to_ggtt(obj); if (!vma) return false; @@ -4337,8 +4353,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, obj->fence_reg = I915_FENCE_REG_NONE; obj->madv = I915_MADV_WILLNEED; - /* Avoid an unnecessary call to unbind on the first bind. */ - obj->map_and_fenceable = true; i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); } @@ -4499,12 +4513,18 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, void i915_gem_vma_destroy(struct i915_vma *vma) { + struct i915_address_space *vm = NULL; WARN_ON(vma->node.allocated); /* Keep the vma as a placeholder in the execbuffer reservation lists */ if (!list_empty(&vma->exec_list)) return; + vm = vma->vm; + + if (!i915_is_ggtt(vm)) + i915_ppgtt_put(i915_vm_to_ppgtt(vm)); + list_del(&vma->vma_link); kfree(vma); @@ -4518,7 +4538,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev) int i; for_each_ring(ring, dev_priv, i) - intel_stop_ring_buffer(ring); + dev_priv->gt.stop_ring(ring); } int @@ -4554,7 +4574,7 @@ i915_gem_suspend(struct drm_device *dev) del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_delayed_work_sync(&dev_priv->mm.retire_work); - cancel_delayed_work_sync(&dev_priv->mm.idle_work); + flush_delayed_work(&dev_priv->mm.idle_work); return 0; @@ -4635,7 +4655,7 @@ intel_enable_blt(struct drm_device *dev) return true; } -static int i915_gem_init_rings(struct drm_device *dev) +int i915_gem_init_rings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; @@ -4718,7 +4738,7 @@ i915_gem_init_hw(struct drm_device *dev) i915_gem_init_swizzling(dev); - ret = i915_gem_init_rings(dev); + ret = dev_priv->gt.init_rings(dev); if (ret) return ret; @@ -4736,6 +4756,14 @@ i915_gem_init_hw(struct drm_device *dev) if (ret && ret != -EIO) { DRM_ERROR("Context enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); + + return ret; + } + + ret = i915_ppgtt_init_hw(dev); + if (ret && ret != -EIO) { + DRM_ERROR("PPGTT enable failed %d\n", ret); + i915_gem_cleanup_ringbuffer(dev); } return ret; @@ -4746,6 +4774,9 @@ int i915_gem_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret; + i915.enable_execlists = intel_sanitize_enable_execlists(dev, + i915.enable_execlists); + mutex_lock(&dev->struct_mutex); if (IS_VALLEYVIEW(dev)) { @@ -4756,7 +4787,24 @@ int i915_gem_init(struct drm_device *dev) DRM_DEBUG_DRIVER("allow wake ack timed out\n"); } - i915_gem_init_userptr(dev); + if (!i915.enable_execlists) { + dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission; + dev_priv->gt.init_rings = i915_gem_init_rings; + dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer; + dev_priv->gt.stop_ring = intel_stop_ring_buffer; + } else { + dev_priv->gt.do_execbuf = intel_execlists_submission; + dev_priv->gt.init_rings = intel_logical_rings_init; + dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup; + dev_priv->gt.stop_ring = intel_logical_ring_stop; + } + + ret = i915_gem_init_userptr(dev); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ret; + } + i915_gem_init_global_gtt(dev); ret = i915_gem_context_init(dev); @@ -4791,7 +4839,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) int i; for_each_ring(ring, dev_priv, i) - intel_cleanup_ring_buffer(ring); + dev_priv->gt.cleanup_ring(ring); } int @@ -5103,9 +5151,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - if (!dev_priv->mm.aliasing_ppgtt || - vm == &dev_priv->mm.aliasing_ppgtt->base) - vm = &dev_priv->gtt.base; + WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); list_for_each_entry(vma, &o->vma_list, vma_link) { if (vma->vm == vm) @@ -5146,9 +5192,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - if (!dev_priv->mm.aliasing_ppgtt || - vm == &dev_priv->mm.aliasing_ppgtt->base) - vm = &dev_priv->gtt.base; + WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); BUG_ON(list_empty(&o->vma_list)); @@ -5253,14 +5297,8 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) { struct i915_vma *vma; - /* This WARN has probably outlived its usefulness (callers already - * WARN if they don't find the GGTT vma they expect). When removing, - * remember to remove the pre-check in is_pin_display() as well */ - if (WARN_ON(list_empty(&obj->vma_list))) - return NULL; - vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); - if (vma->vm != obj_to_ggtt(obj)) + if (vma->vm != i915_obj_to_ggtt(obj)) return NULL; return vma; |