diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_psr.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_psr.c | 411 |
1 files changed, 93 insertions, 318 deletions
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index db27f2faa1de..d4cd19fea148 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -97,10 +97,6 @@ void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) { u32 debug_mask, mask; - /* No PSR interrupts on VLV/CHV */ - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return; - mask = EDP_PSR_ERROR(TRANSCODER_EDP); debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) | EDP_PSR_PRE_ENTRY(TRANSCODER_EDP); @@ -201,15 +197,6 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) } } -static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp) -{ - uint8_t psr_caps = 0; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1) - return false; - return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED; -} - static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) { uint8_t dprx = 0; @@ -232,13 +219,13 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) { - u8 val = 0; + u8 val = 8; /* assume the worst if we can't read the value */ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; else - DRM_ERROR("Unable to get sink synchronization latency\n"); + DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n"); return val; } @@ -250,13 +237,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, sizeof(intel_dp->psr_dpcd)); - if (intel_dp->psr_dpcd[0]) { - dev_priv->psr.sink_support = true; - DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); + if (!intel_dp->psr_dpcd[0]) + return; + DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", + intel_dp->psr_dpcd[0]); + + if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { + DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); + return; } + dev_priv->psr.sink_support = true; + dev_priv->psr.sink_sync_latency = + intel_dp_get_sink_sync_latency(intel_dp); if (INTEL_GEN(dev_priv) >= 9 && (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { + bool y_req = intel_dp->psr_dpcd[1] & + DP_PSR2_SU_Y_COORDINATE_REQUIRED; + bool alpm = intel_dp_get_alpm_status(intel_dp); + /* * All panels that supports PSR version 03h (PSR2 + * Y-coordinate) can handle Y-coordinates in VSC but we are @@ -268,47 +267,17 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) * Y-coordinate requirement panels we would need to enable * GTC first. */ - dev_priv->psr.sink_psr2_support = - intel_dp_get_y_coord_required(intel_dp); - DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support - ? "supported" : "not supported"); + dev_priv->psr.sink_psr2_support = y_req && alpm; + DRM_DEBUG_KMS("PSR2 %ssupported\n", + dev_priv->psr.sink_psr2_support ? "" : "not "); if (dev_priv->psr.sink_psr2_support) { dev_priv->psr.colorimetry_support = intel_dp_get_colorimetry_status(intel_dp); - dev_priv->psr.alpm = - intel_dp_get_alpm_status(intel_dp); - dev_priv->psr.sink_sync_latency = - intel_dp_get_sink_sync_latency(intel_dp); } } } -static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t val; - - val = I915_READ(VLV_PSRSTAT(pipe)) & - VLV_EDP_PSR_CURR_STATE_MASK; - return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) || - (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE); -} - -static void vlv_psr_setup_vsc(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - uint32_t val; - - /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */ - val = I915_READ(VLV_VSCSDP(crtc->pipe)); - val &= ~VLV_EDP_PSR_SDP_FREQ_MASK; - val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME; - I915_WRITE(VLV_VSCSDP(crtc->pipe), val); -} - static void hsw_psr_setup_vsc(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -341,12 +310,6 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp, DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); } -static void vlv_psr_enable_sink(struct intel_dp *intel_dp) -{ - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); -} - static void hsw_psr_setup_aux(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -389,13 +352,12 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) u8 dpcd_val = DP_PSR_ENABLE; /* Enable ALPM at sink for psr2 */ - if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm) - drm_dp_dpcd_writeb(&intel_dp->aux, - DP_RECEIVER_ALPM_CONFIG, - DP_ALPM_ENABLE); - - if (dev_priv->psr.psr2_enabled) + if (dev_priv->psr.psr2_enabled) { + drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, + DP_ALPM_ENABLE); dpcd_val |= DP_PSR_ENABLE_PSR2; + } + if (dev_priv->psr.link_standby) dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); @@ -403,81 +365,49 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); } -static void vlv_psr_enable_source(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - - /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */ - I915_WRITE(VLV_PSRCTL(crtc->pipe), - VLV_EDP_PSR_MODE_SW_TIMER | - VLV_EDP_PSR_SRC_TRANSMITTER_STATE | - VLV_EDP_PSR_ENABLE); -} - -static void vlv_psr_activate(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_crtc *crtc = dig_port->base.base.crtc; - enum pipe pipe = to_intel_crtc(crtc)->pipe; - - /* - * Let's do the transition from PSR_state 1 (inactive) to - * PSR_state 2 (transition to active - static frame transmission). - * Then Hardware is responsible for the transition to - * PSR_state 3 (active - no Remote Frame Buffer (RFB) update). - */ - I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) | - VLV_EDP_PSR_ACTIVE_ENTRY); -} - static void hsw_activate_psr1(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + u32 max_sleep_time = 0x1f; + u32 val = EDP_PSR_ENABLE; - uint32_t max_sleep_time = 0x1f; - /* - * Let's respect VBT in case VBT asks a higher idle_frame value. - * Let's use 6 as the minimum to cover all known cases including - * the off-by-one issue that HW has in some cases. Also there are - * cases where sink should be able to train - * with the 5 or 6 idle patterns. + /* Let's use 6 as the minimum to cover all known cases including the + * off-by-one issue that HW has in some cases. */ - uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - uint32_t val = EDP_PSR_ENABLE; + int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; + /* sink_sync_latency of 8 means source has to wait for more than 8 + * frames, we'll go with 9 frames for now + */ + idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; + val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; if (IS_HASWELL(dev_priv)) val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; if (dev_priv->psr.link_standby) val |= EDP_PSR_LINK_STANDBY; - if (dev_priv->vbt.psr.tp1_wakeup_time > 5) - val |= EDP_PSR_TP1_TIME_2500us; - else if (dev_priv->vbt.psr.tp1_wakeup_time > 1) - val |= EDP_PSR_TP1_TIME_500us; - else if (dev_priv->vbt.psr.tp1_wakeup_time > 0) + if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) + val |= EDP_PSR_TP1_TIME_0us; + else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) val |= EDP_PSR_TP1_TIME_100us; + else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) + val |= EDP_PSR_TP1_TIME_500us; else - val |= EDP_PSR_TP1_TIME_0us; + val |= EDP_PSR_TP1_TIME_2500us; - if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) - val |= EDP_PSR_TP2_TP3_TIME_2500us; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) - val |= EDP_PSR_TP2_TP3_TIME_500us; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) + if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) + val |= EDP_PSR_TP2_TP3_TIME_0us; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) val |= EDP_PSR_TP2_TP3_TIME_100us; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) + val |= EDP_PSR_TP2_TP3_TIME_500us; else - val |= EDP_PSR_TP2_TP3_TIME_0us; + val |= EDP_PSR_TP2_TP3_TIME_2500us; if (intel_dp_source_supports_hbr2(intel_dp) && drm_dp_tps3_supported(intel_dp->dpcd)) @@ -494,15 +424,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - /* - * Let's respect VBT in case VBT asks a higher idle_frame value. - * Let's use 6 as the minimum to cover all known cases including - * the off-by-one issue that HW has in some cases. Also there are - * cases where sink should be able to train - * with the 5 or 6 idle patterns. + u32 val; + + /* Let's use 6 as the minimum to cover all known cases including the + * off-by-one issue that HW has in some cases. */ - uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; + int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); + + idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); + val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; /* FIXME: selective update is probably totally broken because it doesn't * mesh at all with our frontbuffer tracking. And the hw alone isn't @@ -513,14 +443,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); - if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) - val |= EDP_PSR2_TP2_TIME_2500; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) - val |= EDP_PSR2_TP2_TIME_500; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) - val |= EDP_PSR2_TP2_TIME_100; + if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 && + dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50) + val |= EDP_PSR2_TP2_TIME_50us; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) + val |= EDP_PSR2_TP2_TIME_100us; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) + val |= EDP_PSR2_TP2_TIME_500us; else - val |= EDP_PSR2_TP2_TIME_50; + val |= EDP_PSR2_TP2_TIME_2500us; I915_WRITE(EDP_PSR2_CTL, val); } @@ -602,17 +533,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, * ones. Since by Display design transcoder EDP is tied to port A * we can safely escape based on the port A. */ - if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) { + if (dig_port->base.port != PORT_A) { DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); return; } - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - !dev_priv->psr.link_standby) { - DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n"); - return; - } - if (IS_HASWELL(dev_priv) && I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) & S3D_ENABLE) { @@ -640,11 +565,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, return; } - if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { - DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n"); - return; - } - crtc_state->has_psr = true; crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : ""); @@ -751,57 +671,12 @@ void intel_psr_enable(struct intel_dp *intel_dp, dev_priv->psr.enable_source(intel_dp, crtc_state); dev_priv->psr.enabled = intel_dp; - if (INTEL_GEN(dev_priv) >= 9) { - intel_psr_activate(intel_dp); - } else { - /* - * FIXME: Activation should happen immediately since this - * function is just called after pipe is fully trained and - * enabled. - * However on some platforms we face issues when first - * activation follows a modeset so quickly. - * - On VLV/CHV we get bank screen on first activation - * - On HSW/BDW we get a recoverable frozen screen until - * next exit-activate sequence. - */ - schedule_delayed_work(&dev_priv->psr.work, - msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); - } + intel_psr_activate(intel_dp); unlock: mutex_unlock(&dev_priv->psr.lock); } -static void vlv_psr_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *old_crtc_state) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); - uint32_t val; - - if (dev_priv->psr.active) { - /* Put VLV PSR back to PSR_state 0 (disabled). */ - if (intel_wait_for_register(dev_priv, - VLV_PSRSTAT(crtc->pipe), - VLV_EDP_PSR_IN_TRANS, - 0, - 1)) - WARN(1, "PSR transition took longer than expected\n"); - - val = I915_READ(VLV_PSRCTL(crtc->pipe)); - val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; - val &= ~VLV_EDP_PSR_ENABLE; - val &= ~VLV_EDP_PSR_MODE_MASK; - I915_WRITE(VLV_PSRCTL(crtc->pipe), val); - - dev_priv->psr.active = false; - } else { - WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe)); - } -} - static void hsw_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { @@ -879,8 +754,7 @@ void intel_psr_disable(struct intel_dp *intel_dp, dev_priv->psr.enabled = NULL; mutex_unlock(&dev_priv->psr.lock); - - cancel_delayed_work_sync(&dev_priv->psr.work); + cancel_work_sync(&dev_priv->psr.work); } static bool psr_wait_for_idle(struct drm_i915_private *dev_priv) @@ -894,21 +768,12 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv) if (!intel_dp) return false; - if (HAS_DDI(dev_priv)) { - if (dev_priv->psr.psr2_enabled) { - reg = EDP_PSR2_STATUS; - mask = EDP_PSR2_STATUS_STATE_MASK; - } else { - reg = EDP_PSR_STATUS; - mask = EDP_PSR_STATUS_STATE_MASK; - } + if (dev_priv->psr.psr2_enabled) { + reg = EDP_PSR2_STATUS; + mask = EDP_PSR2_STATUS_STATE_MASK; } else { - struct drm_crtc *crtc = - dp_to_dig_port(intel_dp)->base.base.crtc; - enum pipe pipe = to_intel_crtc(crtc)->pipe; - - reg = VLV_PSRSTAT(pipe); - mask = VLV_EDP_PSR_IN_TRANS; + reg = EDP_PSR_STATUS; + mask = EDP_PSR_STATUS_STATE_MASK; } mutex_unlock(&dev_priv->psr.lock); @@ -925,10 +790,13 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv) static void intel_psr_work(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), psr.work.work); + container_of(work, typeof(*dev_priv), psr.work); mutex_lock(&dev_priv->psr.lock); + if (!dev_priv->psr.enabled) + goto unlock; + /* * We have to make sure PSR is ready for re-enable * otherwise it keeps disabled until next full enable/disable cycle. @@ -953,103 +821,24 @@ unlock: static void intel_psr_exit(struct drm_i915_private *dev_priv) { - struct intel_dp *intel_dp = dev_priv->psr.enabled; - struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; - enum pipe pipe = to_intel_crtc(crtc)->pipe; u32 val; if (!dev_priv->psr.active) return; - if (HAS_DDI(dev_priv)) { - if (dev_priv->psr.psr2_enabled) { - val = I915_READ(EDP_PSR2_CTL); - WARN_ON(!(val & EDP_PSR2_ENABLE)); - I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); - } else { - val = I915_READ(EDP_PSR_CTL); - WARN_ON(!(val & EDP_PSR_ENABLE)); - I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); - } + if (dev_priv->psr.psr2_enabled) { + val = I915_READ(EDP_PSR2_CTL); + WARN_ON(!(val & EDP_PSR2_ENABLE)); + I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); } else { - val = I915_READ(VLV_PSRCTL(pipe)); - - /* - * Here we do the transition drirectly from - * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to - * PSR_state 5 (exit). - * PSR State 4 (active with single frame update) can be skipped. - * On PSR_state 5 (exit) Hardware is responsible to transition - * back to PSR_state 1 (inactive). - * Now we are at Same state after vlv_psr_enable_source. - */ - val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; - I915_WRITE(VLV_PSRCTL(pipe), val); - - /* - * Send AUX wake up - Spec says after transitioning to PSR - * active we have to send AUX wake up by writing 01h in DPCD - * 600h of sink device. - * XXX: This might slow down the transition, but without this - * HW doesn't complete the transition to PSR_state 1 and we - * never get the screen updated. - */ - drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, - DP_SET_POWER_D0); + val = I915_READ(EDP_PSR_CTL); + WARN_ON(!(val & EDP_PSR_ENABLE)); + I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); } - dev_priv->psr.active = false; } /** - * intel_psr_single_frame_update - Single Frame Update - * @dev_priv: i915 device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * Some platforms support a single frame update feature that is used to - * send and update only one frame on Remote Frame Buffer. - * So far it is only implemented for Valleyview and Cherryview because - * hardware requires this to be done before a page flip. - */ -void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, - unsigned frontbuffer_bits) -{ - struct drm_crtc *crtc; - enum pipe pipe; - u32 val; - - if (!CAN_PSR(dev_priv)) - return; - - /* - * Single frame update is already supported on BDW+ but it requires - * many W/A and it isn't really needed. - */ - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) - return; - - mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.enabled) { - mutex_unlock(&dev_priv->psr.lock); - return; - } - - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - - if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) { - val = I915_READ(VLV_PSRCTL(pipe)); - - /* - * We need to set this bit before writing registers for a flip. - * This bit will be self-clear when it gets to the PSR active state. - */ - I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE); - } - mutex_unlock(&dev_priv->psr.lock); -} - -/** * intel_psr_invalidate - Invalidade PSR * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits @@ -1071,7 +860,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, if (!CAN_PSR(dev_priv)) return; - if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP) + if (origin == ORIGIN_FLIP) return; mutex_lock(&dev_priv->psr.lock); @@ -1114,7 +903,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, if (!CAN_PSR(dev_priv)) return; - if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP) + if (origin == ORIGIN_FLIP) return; mutex_lock(&dev_priv->psr.lock); @@ -1131,8 +920,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, /* By definition flush = invalidate + flush */ if (frontbuffer_bits) { - if (dev_priv->psr.psr2_enabled || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + if (dev_priv->psr.psr2_enabled) { intel_psr_exit(dev_priv); } else { /* @@ -1149,9 +937,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, } if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) - if (!work_busy(&dev_priv->psr.work.work)) - schedule_delayed_work(&dev_priv->psr.work, - msecs_to_jiffies(100)); + schedule_work(&dev_priv->psr.work); mutex_unlock(&dev_priv->psr.lock); } @@ -1184,9 +970,6 @@ void intel_psr_init(struct drm_i915_private *dev_priv) if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) /* HSW and BDW require workarounds that we don't implement. */ dev_priv->psr.link_standby = false; - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - /* On VLV and CHV only standby mode is supported. */ - dev_priv->psr.link_standby = true; else /* For new platforms let's respect VBT back again */ dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; @@ -1201,21 +984,13 @@ void intel_psr_init(struct drm_i915_private *dev_priv) dev_priv->psr.link_standby = false; } - INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); + INIT_WORK(&dev_priv->psr.work, intel_psr_work); mutex_init(&dev_priv->psr.lock); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - dev_priv->psr.enable_source = vlv_psr_enable_source; - dev_priv->psr.disable_source = vlv_psr_disable; - dev_priv->psr.enable_sink = vlv_psr_enable_sink; - dev_priv->psr.activate = vlv_psr_activate; - dev_priv->psr.setup_vsc = vlv_psr_setup_vsc; - } else { - dev_priv->psr.has_hw_tracking = true; - dev_priv->psr.enable_source = hsw_psr_enable_source; - dev_priv->psr.disable_source = hsw_psr_disable; - dev_priv->psr.enable_sink = hsw_psr_enable_sink; - dev_priv->psr.activate = hsw_psr_activate; - dev_priv->psr.setup_vsc = hsw_psr_setup_vsc; - } + dev_priv->psr.enable_source = hsw_psr_enable_source; + dev_priv->psr.disable_source = hsw_psr_disable; + dev_priv->psr.enable_sink = hsw_psr_enable_sink; + dev_priv->psr.activate = hsw_psr_activate; + dev_priv->psr.setup_vsc = hsw_psr_setup_vsc; + } |