diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
| -rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 23 | 
1 files changed, 16 insertions, 7 deletions
| diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 249623d45be0..6a29784d2b41 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)  		break;  	} +	/* When byt can survive without system hang with dynamic +	 * sw freq adjustments, this restriction can be lifted. +	 */ +	if (IS_VALLEYVIEW(dev_priv)) +		goto skip_hw_write; +  	I915_WRITE(GEN6_RP_UP_EI,  		   GT_INTERVAL_FROM_US(dev_priv, ei_up));  	I915_WRITE(GEN6_RP_UP_THRESHOLD, @@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)  		   GEN6_RP_UP_BUSY_AVG |  		   GEN6_RP_DOWN_IDLE_AVG); +skip_hw_write:  	dev_priv->rps.power = new_power;  	dev_priv->rps.up_threshold = threshold_up;  	dev_priv->rps.down_threshold = threshold_down; @@ -4921,8 +4928,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)  {  	u32 mask = 0; +	/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */  	if (val > dev_priv->rps.min_freq_softlimit) -		mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; +		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;  	if (val < dev_priv->rps.max_freq_softlimit)  		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; @@ -5032,7 +5040,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)  {  	mutex_lock(&dev_priv->rps.hw_lock);  	if (dev_priv->rps.enabled) { -		if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) +		if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)  			gen6_rps_reset_ei(dev_priv);  		I915_WRITE(GEN6_PMINTRMSK,  			   gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); @@ -7916,10 +7924,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,   * @timeout_base_ms: timeout for polling with preemption enabled   *   * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE - * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. + * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.   * The request is acknowledged once the PCODE reply dword equals @reply after   * applying @reply_mask. Polling is first attempted with preemption enabled - * for @timeout_base_ms and if this times out for another 10 ms with + * for @timeout_base_ms and if this times out for another 50 ms with   * preemption disabled.   *   * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some @@ -7955,14 +7963,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,  	 * worst case) _and_ PCODE was busy for some reason even after a  	 * (queued) request and @timeout_base_ms delay. As a workaround retry  	 * the poll with preemption disabled to maximize the number of -	 * requests. Increase the timeout from @timeout_base_ms to 10ms to +	 * requests. Increase the timeout from @timeout_base_ms to 50ms to  	 * account for interrupts that could reduce the number of these -	 * requests. +	 * requests, and for any quirks of the PCODE firmware that delays +	 * the request completion.  	 */  	DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");  	WARN_ON_ONCE(timeout_base_ms > 3);  	preempt_disable(); -	ret = wait_for_atomic(COND, 10); +	ret = wait_for_atomic(COND, 50);  	preempt_enable();  out: |