diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
| -rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 213 | 
1 files changed, 176 insertions, 37 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 2346b920bd86..1d5d613eb6be 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -318,6 +318,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,  		 * TLB invalidate requires a post-sync write.  		 */  		flags |= PIPE_CONTROL_QW_WRITE; +		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;  		/* Workaround: we must issue a pipe_control with CS-stall bit  		 * set before a pipe_control command that has the state cache @@ -331,7 +332,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,  	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));  	intel_ring_emit(ring, flags); -	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); +	intel_ring_emit(ring, scratch_addr);  	intel_ring_emit(ring, 0);  	intel_ring_advance(ring); @@ -467,6 +468,9 @@ init_pipe_control(struct intel_ring_buffer *ring)  	if (pc->cpu_page == NULL)  		goto err_unpin; +	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", +			 ring->name, pc->gtt_offset); +  	pc->obj = obj;  	ring->private = pc;  	return 0; @@ -505,13 +509,25 @@ static int init_render_ring(struct intel_ring_buffer *ring)  	struct drm_i915_private *dev_priv = dev->dev_private;  	int ret = init_ring_common(ring); -	if (INTEL_INFO(dev)->gen > 3) { +	if (INTEL_INFO(dev)->gen > 3)  		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); -		if (IS_GEN7(dev)) -			I915_WRITE(GFX_MODE_GEN7, -				   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | -				   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); -	} + +	/* We need to disable the AsyncFlip performance optimisations in order +	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be +	 * programmed to '1' on all products. +	 */ +	if (INTEL_INFO(dev)->gen >= 6) +		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); + +	/* Required for the hardware to program scanline values for waiting */ +	if (INTEL_INFO(dev)->gen == 6) +		I915_WRITE(GFX_MODE, +			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); + +	if (IS_GEN7(dev)) +		I915_WRITE(GFX_MODE_GEN7, +			   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | +			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));  	if (INTEL_INFO(dev)->gen >= 5) {  		ret = init_pipe_control(ring); @@ -547,9 +563,14 @@ static int init_render_ring(struct intel_ring_buffer *ring)  static void render_ring_cleanup(struct intel_ring_buffer *ring)  { +	struct drm_device *dev = ring->dev; +  	if (!ring->private)  		return; +	if (HAS_BROKEN_CS_TLB(dev)) +		drm_gem_object_unreference(to_gem_object(ring->private)); +  	cleanup_pipe_control(ring);  } @@ -596,6 +617,13 @@ gen6_add_request(struct intel_ring_buffer *ring)  	return 0;  } +static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, +					      u32 seqno) +{ +	struct drm_i915_private *dev_priv = dev->dev_private; +	return dev_priv->last_seqno < seqno; +} +  /**   * intel_ring_sync - sync the waiter to the signaller on seqno   * @@ -626,11 +654,20 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,  	if (ret)  		return ret; -	intel_ring_emit(waiter, -			dw1 | signaller->semaphore_register[waiter->id]); -	intel_ring_emit(waiter, seqno); -	intel_ring_emit(waiter, 0); -	intel_ring_emit(waiter, MI_NOOP); +	/* If seqno wrap happened, omit the wait with no-ops */ +	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { +		intel_ring_emit(waiter, +				dw1 | +				signaller->semaphore_register[waiter->id]); +		intel_ring_emit(waiter, seqno); +		intel_ring_emit(waiter, 0); +		intel_ring_emit(waiter, MI_NOOP); +	} else { +		intel_ring_emit(waiter, MI_NOOP); +		intel_ring_emit(waiter, MI_NOOP); +		intel_ring_emit(waiter, MI_NOOP); +		intel_ring_emit(waiter, MI_NOOP); +	}  	intel_ring_advance(waiter);  	return 0; @@ -711,6 +748,12 @@ ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)  	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);  } +static void +ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ +	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); +} +  static u32  pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)  { @@ -718,6 +761,13 @@ pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)  	return pc->cpu_page[0];  } +static void +pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ +	struct pipe_control *pc = ring->private; +	pc->cpu_page[0] = seqno; +} +  static bool  gen5_ring_get_irq(struct intel_ring_buffer *ring)  { @@ -969,6 +1019,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,  	return 0;  } +/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ +#define I830_BATCH_LIMIT (256*1024)  static int  i830_dispatch_execbuffer(struct intel_ring_buffer *ring,  				u32 offset, u32 len, @@ -976,15 +1028,47 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,  {  	int ret; -	ret = intel_ring_begin(ring, 4); -	if (ret) -		return ret; +	if (flags & I915_DISPATCH_PINNED) { +		ret = intel_ring_begin(ring, 4); +		if (ret) +			return ret; -	intel_ring_emit(ring, MI_BATCH_BUFFER); -	intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); -	intel_ring_emit(ring, offset + len - 8); -	intel_ring_emit(ring, 0); -	intel_ring_advance(ring); +		intel_ring_emit(ring, MI_BATCH_BUFFER); +		intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); +		intel_ring_emit(ring, offset + len - 8); +		intel_ring_emit(ring, MI_NOOP); +		intel_ring_advance(ring); +	} else { +		struct drm_i915_gem_object *obj = ring->private; +		u32 cs_offset = obj->gtt_offset; + +		if (len > I830_BATCH_LIMIT) +			return -ENOSPC; + +		ret = intel_ring_begin(ring, 9+3); +		if (ret) +			return ret; +		/* Blit the batch (which has now all relocs applied) to the stable batch +		 * scratch bo area (so that the CS never stumbles over its tlb +		 * invalidation bug) ... */ +		intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | +				XY_SRC_COPY_BLT_WRITE_ALPHA | +				XY_SRC_COPY_BLT_WRITE_RGB); +		intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); +		intel_ring_emit(ring, 0); +		intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); +		intel_ring_emit(ring, cs_offset); +		intel_ring_emit(ring, 0); +		intel_ring_emit(ring, 4096); +		intel_ring_emit(ring, offset); +		intel_ring_emit(ring, MI_FLUSH); + +		/* ... and execute it. */ +		intel_ring_emit(ring, MI_BATCH_BUFFER); +		intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); +		intel_ring_emit(ring, cs_offset + len - 8); +		intel_ring_advance(ring); +	}  	return 0;  } @@ -1113,7 +1197,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,  			return ret;  	} -	obj = i915_gem_alloc_object(dev, ring->size); +	obj = NULL; +	if (!HAS_LLC(dev)) +		obj = i915_gem_object_create_stolen(dev, ring->size); +	if (obj == NULL) +		obj = i915_gem_alloc_object(dev, ring->size);  	if (obj == NULL) {  		DRM_ERROR("Failed to allocate ringbuffer\n");  		ret = -ENOMEM; @@ -1131,7 +1219,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,  		goto err_unpin;  	ring->virtual_start = -		ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, +		ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,  			   ring->size);  	if (ring->virtual_start == NULL) {  		DRM_ERROR("Failed to map ringbuffer.\n"); @@ -1297,7 +1385,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)  		msleep(1); -		ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); +		ret = i915_gem_check_wedge(&dev_priv->gpu_error, +					   dev_priv->mm.interruptible);  		if (ret)  			return ret;  	} while (!time_after(jiffies, end)); @@ -1359,14 +1448,35 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)  	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);  } +static int __intel_ring_begin(struct intel_ring_buffer *ring, +			      int bytes) +{ +	int ret; + +	if (unlikely(ring->tail + bytes > ring->effective_size)) { +		ret = intel_wrap_ring_buffer(ring); +		if (unlikely(ret)) +			return ret; +	} + +	if (unlikely(ring->space < bytes)) { +		ret = ring_wait_for_space(ring, bytes); +		if (unlikely(ret)) +			return ret; +	} + +	ring->space -= bytes; +	return 0; +} +  int intel_ring_begin(struct intel_ring_buffer *ring,  		     int num_dwords)  {  	drm_i915_private_t *dev_priv = ring->dev->dev_private; -	int n = 4*num_dwords;  	int ret; -	ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); +	ret = i915_gem_check_wedge(&dev_priv->gpu_error, +				   dev_priv->mm.interruptible);  	if (ret)  		return ret; @@ -1375,20 +1485,21 @@ int intel_ring_begin(struct intel_ring_buffer *ring,  	if (ret)  		return ret; -	if (unlikely(ring->tail + n > ring->effective_size)) { -		ret = intel_wrap_ring_buffer(ring); -		if (unlikely(ret)) -			return ret; -	} +	return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); +} -	if (unlikely(ring->space < n)) { -		ret = ring_wait_for_space(ring, n); -		if (unlikely(ret)) -			return ret; +void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ +	struct drm_i915_private *dev_priv = ring->dev->dev_private; + +	BUG_ON(ring->outstanding_lazy_request); + +	if (INTEL_INFO(ring->dev)->gen >= 6) { +		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); +		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);  	} -	ring->space -= n; -	return 0; +	ring->set_seqno(ring, seqno);  }  void intel_ring_advance(struct intel_ring_buffer *ring) @@ -1396,7 +1507,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)  	struct drm_i915_private *dev_priv = ring->dev->dev_private;  	ring->tail &= ring->size - 1; -	if (dev_priv->stop_rings & intel_ring_flag(ring)) +	if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))  		return;  	ring->write_tail(ring, ring->tail);  } @@ -1553,6 +1664,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  		ring->irq_put = gen6_ring_put_irq;  		ring->irq_enable_mask = GT_USER_INTERRUPT;  		ring->get_seqno = gen6_ring_get_seqno; +		ring->set_seqno = ring_set_seqno;  		ring->sync_to = gen6_ring_sync;  		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;  		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; @@ -1563,6 +1675,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  		ring->add_request = pc_render_add_request;  		ring->flush = gen4_render_ring_flush;  		ring->get_seqno = pc_render_get_seqno; +		ring->set_seqno = pc_render_set_seqno;  		ring->irq_get = gen5_ring_get_irq;  		ring->irq_put = gen5_ring_put_irq;  		ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; @@ -1573,6 +1686,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  		else  			ring->flush = gen4_render_ring_flush;  		ring->get_seqno = ring_get_seqno; +		ring->set_seqno = ring_set_seqno;  		if (IS_GEN2(dev)) {  			ring->irq_get = i8xx_ring_get_irq;  			ring->irq_put = i8xx_ring_put_irq; @@ -1596,6 +1710,27 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  	ring->init = init_render_ring;  	ring->cleanup = render_ring_cleanup; +	/* Workaround batchbuffer to combat CS tlb bug. */ +	if (HAS_BROKEN_CS_TLB(dev)) { +		struct drm_i915_gem_object *obj; +		int ret; + +		obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); +		if (obj == NULL) { +			DRM_ERROR("Failed to allocate batch bo\n"); +			return -ENOMEM; +		} + +		ret = i915_gem_object_pin(obj, 0, true, false); +		if (ret != 0) { +			drm_gem_object_unreference(&obj->base); +			DRM_ERROR("Failed to ping batch bo\n"); +			return ret; +		} + +		ring->private = obj; +	} +  	return intel_init_ring_buffer(dev, ring);  } @@ -1623,6 +1758,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)  	else  		ring->flush = gen4_render_ring_flush;  	ring->get_seqno = ring_get_seqno; +	ring->set_seqno = ring_set_seqno;  	if (IS_GEN2(dev)) {  		ring->irq_get = i8xx_ring_get_irq;  		ring->irq_put = i8xx_ring_put_irq; @@ -1683,6 +1819,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)  		ring->flush = gen6_ring_flush;  		ring->add_request = gen6_add_request;  		ring->get_seqno = gen6_ring_get_seqno; +		ring->set_seqno = ring_set_seqno;  		ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;  		ring->irq_get = gen6_ring_get_irq;  		ring->irq_put = gen6_ring_put_irq; @@ -1698,6 +1835,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)  		ring->flush = bsd_ring_flush;  		ring->add_request = i9xx_add_request;  		ring->get_seqno = ring_get_seqno; +		ring->set_seqno = ring_set_seqno;  		if (IS_GEN5(dev)) {  			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;  			ring->irq_get = gen5_ring_get_irq; @@ -1727,6 +1865,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)  	ring->flush = blt_ring_flush;  	ring->add_request = gen6_add_request;  	ring->get_seqno = gen6_ring_get_seqno; +	ring->set_seqno = ring_set_seqno;  	ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;  	ring->irq_get = gen6_ring_get_irq;  	ring->irq_put = gen6_ring_put_irq;  |