diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
| -rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 839 | 
1 files changed, 645 insertions, 194 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index eadc15cddbeb..ddbb7ed0a193 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -59,6 +59,10 @@ static void gen9_init_clock_gating(struct drm_device *dev)  	/* WaEnableLbsSlaRetryTimerDecrement:skl */  	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |  		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); + +	/* WaDisableKillLogic:bxt,skl */ +	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | +		   ECOCHK_DIS_TLB);  }  static void skl_init_clock_gating(struct drm_device *dev) @@ -91,10 +95,19 @@ static void skl_init_clock_gating(struct drm_device *dev)  			   _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));  	} +	/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes +	 * involving this register should also be added to WA batch as required. +	 */  	if (INTEL_REVID(dev) <= SKL_REVID_E0)  		/* WaDisableLSQCROPERFforOCL:skl */  		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |  			   GEN8_LQSC_RO_PERF_DIS); + +	/* WaEnableGapsTsvCreditFix:skl */ +	if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) { +		I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | +					   GEN9_GAPS_TSV_CREDIT_DISABLE)); +	}  }  static void bxt_init_clock_gating(struct drm_device *dev) @@ -334,22 +347,26 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)  	if (IS_VALLEYVIEW(dev)) {  		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); -		if (IS_CHERRYVIEW(dev)) -			chv_set_memory_pm5(dev_priv, enable); +		POSTING_READ(FW_BLC_SELF_VLV); +		dev_priv->wm.vlv.cxsr = enable;  	} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {  		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); +		POSTING_READ(FW_BLC_SELF);  	} else if (IS_PINEVIEW(dev)) {  		val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;  		val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;  		I915_WRITE(DSPFW3, val); +		POSTING_READ(DSPFW3);  	} else if (IS_I945G(dev) || IS_I945GM(dev)) {  		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :  			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);  		I915_WRITE(FW_BLC_SELF, val); +		POSTING_READ(FW_BLC_SELF);  	} else if (IS_I915GM(dev)) {  		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :  			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);  		I915_WRITE(INSTPM, val); +		POSTING_READ(INSTPM);  	} else {  		return;  	} @@ -923,223 +940,480 @@ static void vlv_write_wm_values(struct intel_crtc *crtc,  			   FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));  	} -	POSTING_READ(DSPFW1); +	/* zero (unused) WM1 watermarks */ +	I915_WRITE(DSPFW4, 0); +	I915_WRITE(DSPFW5, 0); +	I915_WRITE(DSPFW6, 0); +	I915_WRITE(DSPHOWM1, 0); -	dev_priv->wm.vlv = *wm; +	POSTING_READ(DSPFW1);  }  #undef FW_WM_VLV -static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc, -					 struct drm_plane *plane) +enum vlv_wm_level { +	VLV_WM_LEVEL_PM2, +	VLV_WM_LEVEL_PM5, +	VLV_WM_LEVEL_DDR_DVFS, +}; + +/* latency must be in 0.1us units. */ +static unsigned int vlv_wm_method2(unsigned int pixel_rate, +				   unsigned int pipe_htotal, +				   unsigned int horiz_pixels, +				   unsigned int bytes_per_pixel, +				   unsigned int latency)  { -	struct drm_device *dev = crtc->dev; -	struct intel_crtc *intel_crtc = to_intel_crtc(crtc); -	int entries, prec_mult, drain_latency, pixel_size; -	int clock = intel_crtc->config->base.adjusted_mode.crtc_clock; -	const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64; +	unsigned int ret; -	/* -	 * FIXME the plane might have an fb -	 * but be invisible (eg. due to clipping) -	 */ -	if (!intel_crtc->active || !plane->state->fb) -		return 0; +	ret = (latency * pixel_rate) / (pipe_htotal * 10000); +	ret = (ret + 1) * horiz_pixels * bytes_per_pixel; +	ret = DIV_ROUND_UP(ret, 64); -	if (WARN(clock == 0, "Pixel clock is zero!\n")) -		return 0; +	return ret; +} -	pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0); +static void vlv_setup_wm_latency(struct drm_device *dev) +{ +	struct drm_i915_private *dev_priv = dev->dev_private; -	if (WARN(pixel_size == 0, "Pixel size is zero!\n")) -		return 0; +	/* all latencies in usec */ +	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; -	entries = DIV_ROUND_UP(clock, 1000) * pixel_size; +	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; -	prec_mult = high_precision; -	drain_latency = 64 * prec_mult * 4 / entries; +	if (IS_CHERRYVIEW(dev_priv)) { +		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; +		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; -	if (drain_latency > DRAIN_LATENCY_MASK) { -		prec_mult /= 2; -		drain_latency = 64 * prec_mult * 4 / entries; +		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;  	} - -	if (drain_latency > DRAIN_LATENCY_MASK) -		drain_latency = DRAIN_LATENCY_MASK; - -	return drain_latency | (prec_mult == high_precision ? -				DDL_PRECISION_HIGH : DDL_PRECISION_LOW);  } -static int vlv_compute_wm(struct intel_crtc *crtc, -			  struct intel_plane *plane, -			  int fifo_size) +static uint16_t vlv_compute_wm_level(struct intel_plane *plane, +				     struct intel_crtc *crtc, +				     const struct intel_plane_state *state, +				     int level)  { -	int clock, entries, pixel_size; +	struct drm_i915_private *dev_priv = to_i915(plane->base.dev); +	int clock, htotal, pixel_size, width, wm; -	/* -	 * FIXME the plane might have an fb -	 * but be invisible (eg. due to clipping) -	 */ -	if (!crtc->active || !plane->base.state->fb) +	if (dev_priv->wm.pri_latency[level] == 0) +		return USHRT_MAX; + +	if (!state->visible)  		return 0; -	pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0); +	pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);  	clock = crtc->config->base.adjusted_mode.crtc_clock; +	htotal = crtc->config->base.adjusted_mode.crtc_htotal; +	width = crtc->config->pipe_src_w; +	if (WARN_ON(htotal == 0)) +		htotal = 1; -	entries = DIV_ROUND_UP(clock, 1000) * pixel_size; +	if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { +		/* +		 * FIXME the formula gives values that are +		 * too big for the cursor FIFO, and hence we +		 * would never be able to use cursors. For +		 * now just hardcode the watermark. +		 */ +		wm = 63; +	} else { +		wm = vlv_wm_method2(clock, htotal, width, pixel_size, +				    dev_priv->wm.pri_latency[level] * 10); +	} -	/* -	 * Set up the watermark such that we don't start issuing memory -	 * requests until we are within PND's max deadline value (256us). -	 * Idea being to be idle as long as possible while still taking -	 * advatange of PND's deadline scheduling. The limit of 8 -	 * cachelines (used when the FIFO will anyway drain in less time -	 * than 256us) should match what we would be done if trickle -	 * feed were enabled. -	 */ -	return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8); +	return min_t(int, wm, USHRT_MAX);  } -static bool vlv_compute_sr_wm(struct drm_device *dev, -			      struct vlv_wm_values *wm) +static void vlv_compute_fifo(struct intel_crtc *crtc)  { -	struct drm_i915_private *dev_priv = to_i915(dev); -	struct drm_crtc *crtc; -	enum pipe pipe = INVALID_PIPE; -	int num_planes = 0; -	int fifo_size = 0; +	struct drm_device *dev = crtc->base.dev; +	struct vlv_wm_state *wm_state = &crtc->wm_state;  	struct intel_plane *plane; +	unsigned int total_rate = 0; +	const int fifo_size = 512 - 1; +	int fifo_extra, fifo_left = fifo_size; -	wm->sr.cursor = wm->sr.plane = 0; +	for_each_intel_plane_on_crtc(dev, crtc, plane) { +		struct intel_plane_state *state = +			to_intel_plane_state(plane->base.state); -	crtc = single_enabled_crtc(dev); -	/* maxfifo not supported on pipe C */ -	if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) { -		pipe = to_intel_crtc(crtc)->pipe; -		num_planes = !!wm->pipe[pipe].primary + -			!!wm->pipe[pipe].sprite[0] + -			!!wm->pipe[pipe].sprite[1]; -		fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; +		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) +			continue; + +		if (state->visible) { +			wm_state->num_active_planes++; +			total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0); +		}  	} -	if (fifo_size == 0 || num_planes > 1) -		return false; +	for_each_intel_plane_on_crtc(dev, crtc, plane) { +		struct intel_plane_state *state = +			to_intel_plane_state(plane->base.state); +		unsigned int rate; + +		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { +			plane->wm.fifo_size = 63; +			continue; +		} + +		if (!state->visible) { +			plane->wm.fifo_size = 0; +			continue; +		} + +		rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0); +		plane->wm.fifo_size = fifo_size * rate / total_rate; +		fifo_left -= plane->wm.fifo_size; +	} -	wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc), -				       to_intel_plane(crtc->cursor), 0x3f); +	fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1); + +	/* spread the remainder evenly */ +	for_each_intel_plane_on_crtc(dev, crtc, plane) { +		int plane_extra; + +		if (fifo_left == 0) +			break; -	list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {  		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)  			continue; -		if (plane->pipe != pipe) +		/* give it all to the first plane if none are active */ +		if (plane->wm.fifo_size == 0 && +		    wm_state->num_active_planes) +			continue; + +		plane_extra = min(fifo_extra, fifo_left); +		plane->wm.fifo_size += plane_extra; +		fifo_left -= plane_extra; +	} + +	WARN_ON(fifo_left != 0); +} + +static void vlv_invert_wms(struct intel_crtc *crtc) +{ +	struct vlv_wm_state *wm_state = &crtc->wm_state; +	int level; + +	for (level = 0; level < wm_state->num_levels; level++) { +		struct drm_device *dev = crtc->base.dev; +		const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; +		struct intel_plane *plane; + +		wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane; +		wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor; + +		for_each_intel_plane_on_crtc(dev, crtc, plane) { +			switch (plane->base.type) { +				int sprite; +			case DRM_PLANE_TYPE_CURSOR: +				wm_state->wm[level].cursor = plane->wm.fifo_size - +					wm_state->wm[level].cursor; +				break; +			case DRM_PLANE_TYPE_PRIMARY: +				wm_state->wm[level].primary = plane->wm.fifo_size - +					wm_state->wm[level].primary; +				break; +			case DRM_PLANE_TYPE_OVERLAY: +				sprite = plane->plane; +				wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size - +					wm_state->wm[level].sprite[sprite]; +				break; +			} +		} +	} +} + +static void vlv_compute_wm(struct intel_crtc *crtc) +{ +	struct drm_device *dev = crtc->base.dev; +	struct vlv_wm_state *wm_state = &crtc->wm_state; +	struct intel_plane *plane; +	int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1; +	int level; + +	memset(wm_state, 0, sizeof(*wm_state)); + +	wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed; +	wm_state->num_levels = to_i915(dev)->wm.max_level + 1; + +	wm_state->num_active_planes = 0; + +	vlv_compute_fifo(crtc); + +	if (wm_state->num_active_planes != 1) +		wm_state->cxsr = false; + +	if (wm_state->cxsr) { +		for (level = 0; level < wm_state->num_levels; level++) { +			wm_state->sr[level].plane = sr_fifo_size; +			wm_state->sr[level].cursor = 63; +		} +	} + +	for_each_intel_plane_on_crtc(dev, crtc, plane) { +		struct intel_plane_state *state = +			to_intel_plane_state(plane->base.state); + +		if (!state->visible) +			continue; + +		/* normal watermarks */ +		for (level = 0; level < wm_state->num_levels; level++) { +			int wm = vlv_compute_wm_level(plane, crtc, state, level); +			int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511; + +			/* hack */ +			if (WARN_ON(level == 0 && wm > max_wm)) +				wm = max_wm; + +			if (wm > plane->wm.fifo_size) +				break; + +			switch (plane->base.type) { +				int sprite; +			case DRM_PLANE_TYPE_CURSOR: +				wm_state->wm[level].cursor = wm; +				break; +			case DRM_PLANE_TYPE_PRIMARY: +				wm_state->wm[level].primary = wm; +				break; +			case DRM_PLANE_TYPE_OVERLAY: +				sprite = plane->plane; +				wm_state->wm[level].sprite[sprite] = wm; +				break; +			} +		} + +		wm_state->num_levels = level; + +		if (!wm_state->cxsr)  			continue; -		wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc), -					      plane, fifo_size); -		if (wm->sr.plane != 0) +		/* maxfifo watermarks */ +		switch (plane->base.type) { +			int sprite, level; +		case DRM_PLANE_TYPE_CURSOR: +			for (level = 0; level < wm_state->num_levels; level++) +				wm_state->sr[level].cursor = +					wm_state->sr[level].cursor; +			break; +		case DRM_PLANE_TYPE_PRIMARY: +			for (level = 0; level < wm_state->num_levels; level++) +				wm_state->sr[level].plane = +					min(wm_state->sr[level].plane, +					    wm_state->wm[level].primary);  			break; +		case DRM_PLANE_TYPE_OVERLAY: +			sprite = plane->plane; +			for (level = 0; level < wm_state->num_levels; level++) +				wm_state->sr[level].plane = +					min(wm_state->sr[level].plane, +					    wm_state->wm[level].sprite[sprite]); +			break; +		}  	} -	return true; +	/* clear any (partially) filled invalid levels */ +	for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) { +		memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level])); +		memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level])); +	} + +	vlv_invert_wms(crtc);  } -static void valleyview_update_wm(struct drm_crtc *crtc) +#define VLV_FIFO(plane, value) \ +	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) + +static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)  { -	struct drm_device *dev = crtc->dev; -	struct drm_i915_private *dev_priv = dev->dev_private; -	struct intel_crtc *intel_crtc = to_intel_crtc(crtc); -	enum pipe pipe = intel_crtc->pipe; -	bool cxsr_enabled; -	struct vlv_wm_values wm = dev_priv->wm.vlv; +	struct drm_device *dev = crtc->base.dev; +	struct drm_i915_private *dev_priv = to_i915(dev); +	struct intel_plane *plane; +	int sprite0_start = 0, sprite1_start = 0, fifo_size = 0; -	wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary); -	wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc, -					       to_intel_plane(crtc->primary), -					       vlv_get_fifo_size(dev, pipe, 0)); +	for_each_intel_plane_on_crtc(dev, crtc, plane) { +		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) { +			WARN_ON(plane->wm.fifo_size != 63); +			continue; +		} -	wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor); -	wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc, -					      to_intel_plane(crtc->cursor), -					      0x3f); +		if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) +			sprite0_start = plane->wm.fifo_size; +		else if (plane->plane == 0) +			sprite1_start = sprite0_start + plane->wm.fifo_size; +		else +			fifo_size = sprite1_start + plane->wm.fifo_size; +	} -	cxsr_enabled = vlv_compute_sr_wm(dev, &wm); +	WARN_ON(fifo_size != 512 - 1); -	if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0) -		return; +	DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n", +		      pipe_name(crtc->pipe), sprite0_start, +		      sprite1_start, fifo_size); -	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " -		      "SR: plane=%d, cursor=%d\n", pipe_name(pipe), -		      wm.pipe[pipe].primary, wm.pipe[pipe].cursor, -		      wm.sr.plane, wm.sr.cursor); +	switch (crtc->pipe) { +		uint32_t dsparb, dsparb2, dsparb3; +	case PIPE_A: +		dsparb = I915_READ(DSPARB); +		dsparb2 = I915_READ(DSPARB2); -	/* -	 * FIXME DDR DVFS introduces massive memory latencies which -	 * are not known to system agent so any deadline specified -	 * by the display may not be respected. To support DDR DVFS -	 * the watermark code needs to be rewritten to essentially -	 * bypass deadline mechanism and rely solely on the -	 * watermarks. For now disable DDR DVFS. -	 */ -	if (IS_CHERRYVIEW(dev_priv)) -		chv_set_memory_dvfs(dev_priv, false); +		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | +			    VLV_FIFO(SPRITEB, 0xff)); +		dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | +			   VLV_FIFO(SPRITEB, sprite1_start)); -	if (!cxsr_enabled) -		intel_set_memory_cxsr(dev_priv, false); +		dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | +			     VLV_FIFO(SPRITEB_HI, 0x1)); +		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | +			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); -	vlv_write_wm_values(intel_crtc, &wm); +		I915_WRITE(DSPARB, dsparb); +		I915_WRITE(DSPARB2, dsparb2); +		break; +	case PIPE_B: +		dsparb = I915_READ(DSPARB); +		dsparb2 = I915_READ(DSPARB2); -	if (cxsr_enabled) -		intel_set_memory_cxsr(dev_priv, true); +		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | +			    VLV_FIFO(SPRITED, 0xff)); +		dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | +			   VLV_FIFO(SPRITED, sprite1_start)); + +		dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | +			     VLV_FIFO(SPRITED_HI, 0xff)); +		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | +			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); + +		I915_WRITE(DSPARB, dsparb); +		I915_WRITE(DSPARB2, dsparb2); +		break; +	case PIPE_C: +		dsparb3 = I915_READ(DSPARB3); +		dsparb2 = I915_READ(DSPARB2); + +		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | +			     VLV_FIFO(SPRITEF, 0xff)); +		dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | +			    VLV_FIFO(SPRITEF, sprite1_start)); + +		dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | +			     VLV_FIFO(SPRITEF_HI, 0xff)); +		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | +			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); + +		I915_WRITE(DSPARB3, dsparb3); +		I915_WRITE(DSPARB2, dsparb2); +		break; +	default: +		break; +	}  } -static void valleyview_update_sprite_wm(struct drm_plane *plane, -					struct drm_crtc *crtc, -					uint32_t sprite_width, -					uint32_t sprite_height, -					int pixel_size, -					bool enabled, bool scaled) +#undef VLV_FIFO + +static void vlv_merge_wm(struct drm_device *dev, +			 struct vlv_wm_values *wm) +{ +	struct intel_crtc *crtc; +	int num_active_crtcs = 0; + +	wm->level = to_i915(dev)->wm.max_level; +	wm->cxsr = true; + +	for_each_intel_crtc(dev, crtc) { +		const struct vlv_wm_state *wm_state = &crtc->wm_state; + +		if (!crtc->active) +			continue; + +		if (!wm_state->cxsr) +			wm->cxsr = false; + +		num_active_crtcs++; +		wm->level = min_t(int, wm->level, wm_state->num_levels - 1); +	} + +	if (num_active_crtcs != 1) +		wm->cxsr = false; + +	if (num_active_crtcs > 1) +		wm->level = VLV_WM_LEVEL_PM2; + +	for_each_intel_crtc(dev, crtc) { +		struct vlv_wm_state *wm_state = &crtc->wm_state; +		enum pipe pipe = crtc->pipe; + +		if (!crtc->active) +			continue; + +		wm->pipe[pipe] = wm_state->wm[wm->level]; +		if (wm->cxsr) +			wm->sr = wm_state->sr[wm->level]; + +		wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2; +		wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2; +		wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2; +		wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2; +	} +} + +static void vlv_update_wm(struct drm_crtc *crtc)  {  	struct drm_device *dev = crtc->dev;  	struct drm_i915_private *dev_priv = dev->dev_private;  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);  	enum pipe pipe = intel_crtc->pipe; -	int sprite = to_intel_plane(plane)->plane; -	bool cxsr_enabled; -	struct vlv_wm_values wm = dev_priv->wm.vlv; +	struct vlv_wm_values wm = {}; -	if (enabled) { -		wm.ddl[pipe].sprite[sprite] = -			vlv_compute_drain_latency(crtc, plane); +	vlv_compute_wm(intel_crtc); +	vlv_merge_wm(dev, &wm); -		wm.pipe[pipe].sprite[sprite] = -			vlv_compute_wm(intel_crtc, -				       to_intel_plane(plane), -				       vlv_get_fifo_size(dev, pipe, sprite+1)); -	} else { -		wm.ddl[pipe].sprite[sprite] = 0; -		wm.pipe[pipe].sprite[sprite] = 0; +	if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) { +		/* FIXME should be part of crtc atomic commit */ +		vlv_pipe_set_fifo_size(intel_crtc); +		return;  	} -	cxsr_enabled = vlv_compute_sr_wm(dev, &wm); - -	if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0) -		return; +	if (wm.level < VLV_WM_LEVEL_DDR_DVFS && +	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS) +		chv_set_memory_dvfs(dev_priv, false); -	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, " -		      "SR: plane=%d, cursor=%d\n", pipe_name(pipe), -		      sprite_name(pipe, sprite), -		      wm.pipe[pipe].sprite[sprite], -		      wm.sr.plane, wm.sr.cursor); +	if (wm.level < VLV_WM_LEVEL_PM5 && +	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5) +		chv_set_memory_pm5(dev_priv, false); -	if (!cxsr_enabled) +	if (!wm.cxsr && dev_priv->wm.vlv.cxsr)  		intel_set_memory_cxsr(dev_priv, false); +	/* FIXME should be part of crtc atomic commit */ +	vlv_pipe_set_fifo_size(intel_crtc); +  	vlv_write_wm_values(intel_crtc, &wm); -	if (cxsr_enabled) +	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " +		      "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n", +		      pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor, +		      wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1], +		      wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr); + +	if (wm.cxsr && !dev_priv->wm.vlv.cxsr)  		intel_set_memory_cxsr(dev_priv, true); + +	if (wm.level >= VLV_WM_LEVEL_PM5 && +	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5) +		chv_set_memory_pm5(dev_priv, true); + +	if (wm.level >= VLV_WM_LEVEL_DDR_DVFS && +	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS) +		chv_set_memory_dvfs(dev_priv, true); + +	dev_priv->wm.vlv = wm;  }  #define single_plane_enabled(mask) is_power_of_2(mask) @@ -1434,23 +1708,22 @@ static void i845_update_wm(struct drm_crtc *unused_crtc)  	I915_WRITE(FW_BLC, fwater_lo);  } -static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, -				    struct drm_crtc *crtc) +uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)  { -	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);  	uint32_t pixel_rate; -	pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock; +	pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;  	/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to  	 * adjust the pixel_rate here. */ -	if (intel_crtc->config->pch_pfit.enabled) { +	if (pipe_config->pch_pfit.enabled) {  		uint64_t pipe_w, pipe_h, pfit_w, pfit_h; -		uint32_t pfit_size = intel_crtc->config->pch_pfit.size; +		uint32_t pfit_size = pipe_config->pch_pfit.size; + +		pipe_w = pipe_config->pipe_src_w; +		pipe_h = pipe_config->pipe_src_h; -		pipe_w = intel_crtc->config->pipe_src_w; -		pipe_h = intel_crtc->config->pipe_src_h;  		pfit_w = (pfit_size >> 16) & 0xFFFF;  		pfit_h = pfit_size & 0xFFFF;  		if (pipe_w < pfit_w) @@ -1815,7 +2088,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)  	linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,  				     mode->crtc_clock);  	ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, -					 dev_priv->display.get_display_clock_speed(dev_priv->dev)); +					 dev_priv->cdclk_freq);  	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |  	       PIPE_WM_LINETIME_TIME(linetime); @@ -2066,7 +2339,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,  	p->active = true;  	p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; -	p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); +	p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);  	if (crtc->primary->state->fb)  		p->pri.bytes_per_pixel = @@ -2085,7 +2358,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,  	p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;  	p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w; -	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { +	drm_for_each_legacy_plane(plane, dev) {  		struct intel_plane *intel_plane = to_intel_plane(plane);  		if (intel_plane->pipe == pipe) { @@ -2215,6 +2488,7 @@ static void ilk_wm_merge(struct drm_device *dev,  			 const struct ilk_wm_maximums *max,  			 struct intel_pipe_wm *merged)  { +	struct drm_i915_private *dev_priv = dev->dev_private;  	int level, max_level = ilk_wm_max_level(dev);  	int last_enabled_level = max_level; @@ -2255,7 +2529,8 @@ static void ilk_wm_merge(struct drm_device *dev,  	 * What we should check here is whether FBC can be  	 * enabled sometime later.  	 */ -	if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) { +	if (IS_GEN5(dev) && !merged->fbc_wm_enabled && +	    intel_fbc_enabled(dev_priv)) {  		for (level = 2; level <= max_level; level++) {  			struct intel_wm_level *wm = &merged->wm[level]; @@ -3043,8 +3318,10 @@ skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)  	if (!to_intel_crtc(crtc)->active)  		return 0; -	return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); +	if (WARN_ON(p->pixel_rate == 0)) +		return 0; +	return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);  }  static void skl_compute_transition_wm(struct drm_crtc *crtc, @@ -3685,6 +3962,159 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)  	}  } +#define _FW_WM(value, plane) \ +	(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) +#define _FW_WM_VLV(value, plane) \ +	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) + +static void vlv_read_wm_values(struct drm_i915_private *dev_priv, +			       struct vlv_wm_values *wm) +{ +	enum pipe pipe; +	uint32_t tmp; + +	for_each_pipe(dev_priv, pipe) { +		tmp = I915_READ(VLV_DDL(pipe)); + +		wm->ddl[pipe].primary = +			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); +		wm->ddl[pipe].cursor = +			(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); +		wm->ddl[pipe].sprite[0] = +			(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); +		wm->ddl[pipe].sprite[1] = +			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); +	} + +	tmp = I915_READ(DSPFW1); +	wm->sr.plane = _FW_WM(tmp, SR); +	wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB); +	wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB); +	wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA); + +	tmp = I915_READ(DSPFW2); +	wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB); +	wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA); +	wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA); + +	tmp = I915_READ(DSPFW3); +	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); + +	if (IS_CHERRYVIEW(dev_priv)) { +		tmp = I915_READ(DSPFW7_CHV); +		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); +		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); + +		tmp = I915_READ(DSPFW8_CHV); +		wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF); +		wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE); + +		tmp = I915_READ(DSPFW9_CHV); +		wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC); +		wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC); + +		tmp = I915_READ(DSPHOWM); +		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; +		wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8; +		wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8; +		wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8; +		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; +		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; +		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; +		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; +		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; +		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; +	} else { +		tmp = I915_READ(DSPFW7); +		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED); +		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC); + +		tmp = I915_READ(DSPHOWM); +		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; +		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8; +		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8; +		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8; +		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8; +		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8; +		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8; +	} +} + +#undef _FW_WM +#undef _FW_WM_VLV + +void vlv_wm_get_hw_state(struct drm_device *dev) +{ +	struct drm_i915_private *dev_priv = to_i915(dev); +	struct vlv_wm_values *wm = &dev_priv->wm.vlv; +	struct intel_plane *plane; +	enum pipe pipe; +	u32 val; + +	vlv_read_wm_values(dev_priv, wm); + +	for_each_intel_plane(dev, plane) { +		switch (plane->base.type) { +			int sprite; +		case DRM_PLANE_TYPE_CURSOR: +			plane->wm.fifo_size = 63; +			break; +		case DRM_PLANE_TYPE_PRIMARY: +			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0); +			break; +		case DRM_PLANE_TYPE_OVERLAY: +			sprite = plane->plane; +			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1); +			break; +		} +	} + +	wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; +	wm->level = VLV_WM_LEVEL_PM2; + +	if (IS_CHERRYVIEW(dev_priv)) { +		mutex_lock(&dev_priv->rps.hw_lock); + +		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); +		if (val & DSP_MAXFIFO_PM5_ENABLE) +			wm->level = VLV_WM_LEVEL_PM5; + +		/* +		 * If DDR DVFS is disabled in the BIOS, Punit +		 * will never ack the request. So if that happens +		 * assume we don't have to enable/disable DDR DVFS +		 * dynamically. To test that just set the REQ_ACK +		 * bit to poke the Punit, but don't change the +		 * HIGH/LOW bits so that we don't actually change +		 * the current state. +		 */ +		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); +		val |= FORCE_DDR_FREQ_REQ_ACK; +		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); + +		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & +			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { +			DRM_DEBUG_KMS("Punit not acking DDR DVFS request, " +				      "assuming DDR DVFS is disabled\n"); +			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; +		} else { +			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); +			if ((val & FORCE_DDR_HIGH_FREQ) == 0) +				wm->level = VLV_WM_LEVEL_DDR_DVFS; +		} + +		mutex_unlock(&dev_priv->rps.hw_lock); +	} + +	for_each_pipe(dev_priv, pipe) +		DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", +			      pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor, +			      wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]); + +	DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", +		      wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); +} +  void ilk_wm_get_hw_state(struct drm_device *dev)  {  	struct drm_i915_private *dev_priv = dev->dev_private; @@ -4083,14 +4513,14 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)  		      "Odd GPU freq value\n"))  		val &= ~1; +	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); +  	if (val != dev_priv->rps.cur_freq) {  		vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);  		if (!IS_CHERRYVIEW(dev_priv))  			gen6_set_rps_thresholds(dev_priv, val);  	} -	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); -  	dev_priv->rps.cur_freq = val;  	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));  } @@ -4250,12 +4680,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)  static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)  { -	/* No RC6 before Ironlake */ -	if (INTEL_INFO(dev)->gen < 5) -		return 0; - -	/* RC6 is only on Ironlake mobile not on desktop */ -	if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev)) +	/* No RC6 before Ironlake and code is gone for ilk. */ +	if (INTEL_INFO(dev)->gen < 6)  		return 0;  	/* Respect the kernel parameter if it is set */ @@ -4275,10 +4701,6 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)  		return enable_rc6 & mask;  	} -	/* Disable RC6 on Ironlake */ -	if (INTEL_INFO(dev)->gen == 5) -		return 0; -  	if (IS_IVYBRIDGE(dev))  		return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); @@ -4297,25 +4719,26 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)  	u32 ddcc_status = 0;  	int ret; -	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);  	/* All of these values are in units of 50MHz */  	dev_priv->rps.cur_freq		= 0;  	/* static values from HW: RP0 > RP1 > RPn (min_freq) */ -	dev_priv->rps.rp0_freq		= (rp_state_cap >>  0) & 0xff; -	dev_priv->rps.rp1_freq		= (rp_state_cap >>  8) & 0xff; -	dev_priv->rps.min_freq		= (rp_state_cap >> 16) & 0xff; -	if (IS_SKYLAKE(dev)) { -		/* Store the frequency values in 16.66 MHZ units, which is -		   the natural hardware unit for SKL */ -		dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; -		dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; -		dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; +	if (IS_BROXTON(dev)) { +		rp_state_cap = I915_READ(BXT_RP_STATE_CAP); +		dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; +		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff; +		dev_priv->rps.min_freq = (rp_state_cap >>  0) & 0xff; +	} else { +		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); +		dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff; +		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff; +		dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;  	} +  	/* hw_max = RP0 until we check for overclocking */  	dev_priv->rps.max_freq		= dev_priv->rps.rp0_freq;  	dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; -	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { +	if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {  		ret = sandybridge_pcode_read(dev_priv,  					HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,  					&ddcc_status); @@ -4327,6 +4750,16 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)  					dev_priv->rps.max_freq);  	} +	if (IS_SKYLAKE(dev)) { +		/* Store the frequency values in 16.66 MHZ units, which is +		   the natural hardware unit for SKL */ +		dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; +		dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; +		dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; +		dev_priv->rps.max_freq *= GEN9_FREQ_SCALER; +		dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER; +	} +  	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;  	/* Preserve min/max settings in case of re-init */ @@ -4619,6 +5052,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)  	int min_freq = 15;  	unsigned int gpu_freq;  	unsigned int max_ia_freq, min_ring_freq; +	unsigned int max_gpu_freq, min_gpu_freq;  	int scaling_factor = 180;  	struct cpufreq_policy *policy; @@ -4643,17 +5077,31 @@ static void __gen6_update_ring_freq(struct drm_device *dev)  	/* convert DDR frequency from units of 266.6MHz to bandwidth */  	min_ring_freq = mult_frac(min_ring_freq, 8, 3); +	if (IS_SKYLAKE(dev)) { +		/* Convert GT frequency to 50 HZ units */ +		min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; +		max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; +	} else { +		min_gpu_freq = dev_priv->rps.min_freq; +		max_gpu_freq = dev_priv->rps.max_freq; +	} +  	/*  	 * For each potential GPU frequency, load a ring frequency we'd like  	 * to use for memory access.  We do this by specifying the IA frequency  	 * the PCU should use as a reference to determine the ring frequency.  	 */ -	for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq; -	     gpu_freq--) { -		int diff = dev_priv->rps.max_freq - gpu_freq; +	for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) { +		int diff = max_gpu_freq - gpu_freq;  		unsigned int ia_freq = 0, ring_freq = 0; -		if (INTEL_INFO(dev)->gen >= 8) { +		if (IS_SKYLAKE(dev)) { +			/* +			 * ring_freq = 2 * GT. ring_freq is in 100MHz units +			 * No floor required for ring frequency on SKL. +			 */ +			ring_freq = gpu_freq; +		} else if (INTEL_INFO(dev)->gen >= 8) {  			/* max(2 * GT, DDR). NB: GT is 50MHz units */  			ring_freq = max(min_ring_freq, gpu_freq);  		} else if (IS_HASWELL(dev)) { @@ -4687,7 +5135,7 @@ void gen6_update_ring_freq(struct drm_device *dev)  {  	struct drm_i915_private *dev_priv = dev->dev_private; -	if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev)) +	if (!HAS_CORE_RING_FREQ(dev))  		return;  	mutex_lock(&dev_priv->rps.hw_lock); @@ -5802,7 +6250,8 @@ static void intel_gen6_powersave_work(struct work_struct *work)  	} else if (INTEL_INFO(dev)->gen >= 9) {  		gen9_enable_rc6(dev);  		gen9_enable_rps(dev); -		__gen6_update_ring_freq(dev); +		if (IS_SKYLAKE(dev)) +			__gen6_update_ring_freq(dev);  	} else if (IS_BROADWELL(dev)) {  		gen8_enable_rps(dev);  		__gen6_update_ring_freq(dev); @@ -6686,13 +7135,15 @@ void intel_init_pm(struct drm_device *dev)  		else if (INTEL_INFO(dev)->gen == 8)  			dev_priv->display.init_clock_gating = broadwell_init_clock_gating;  	} else if (IS_CHERRYVIEW(dev)) { -		dev_priv->display.update_wm = valleyview_update_wm; -		dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm; +		vlv_setup_wm_latency(dev); + +		dev_priv->display.update_wm = vlv_update_wm;  		dev_priv->display.init_clock_gating =  			cherryview_init_clock_gating;  	} else if (IS_VALLEYVIEW(dev)) { -		dev_priv->display.update_wm = valleyview_update_wm; -		dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm; +		vlv_setup_wm_latency(dev); + +		dev_priv->display.update_wm = vlv_update_wm;  		dev_priv->display.init_clock_gating =  			valleyview_init_clock_gating;  	} else if (IS_PINEVIEW(dev)) {  |