aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/display/intel_psr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_psr.c')
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c969
1 files changed, 448 insertions, 521 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index f5b33335a9ae..a9d9383e4ee5 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -27,8 +27,10 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
+#include "intel_cursor_regs.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -233,6 +235,26 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
}
}
+static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (i915->display.params.enable_psr != -1)
+ return false;
+
+ return true;
+}
+
+static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if ((i915->display.params.enable_psr != -1) ||
+ (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
+ return false;
+ return true;
+}
+
static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -269,7 +291,7 @@ static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_CTL(cpu_transcoder);
+ return EDP_PSR_CTL(dev_priv, cpu_transcoder);
else
return HSW_SRD_CTL;
}
@@ -278,7 +300,7 @@ static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_DEBUG(cpu_transcoder);
+ return EDP_PSR_DEBUG(dev_priv, cpu_transcoder);
else
return HSW_SRD_DEBUG;
}
@@ -287,7 +309,7 @@ static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_PERF_CNT(cpu_transcoder);
+ return EDP_PSR_PERF_CNT(dev_priv, cpu_transcoder);
else
return HSW_SRD_PERF_CNT;
}
@@ -296,7 +318,7 @@ static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_STATUS(cpu_transcoder);
+ return EDP_PSR_STATUS(dev_priv, cpu_transcoder);
else
return HSW_SRD_STATUS;
}
@@ -305,7 +327,7 @@ static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
if (DISPLAY_VER(dev_priv) >= 12)
- return TRANS_PSR_IMR(cpu_transcoder);
+ return TRANS_PSR_IMR(dev_priv, cpu_transcoder);
else
return EDP_PSR_IMR;
}
@@ -314,7 +336,7 @@ static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
if (DISPLAY_VER(dev_priv) >= 12)
- return TRANS_PSR_IIR(cpu_transcoder);
+ return TRANS_PSR_IIR(dev_priv, cpu_transcoder);
else
return EDP_PSR_IIR;
}
@@ -323,7 +345,7 @@ static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_AUX_CTL(cpu_transcoder);
+ return EDP_PSR_AUX_CTL(dev_priv, cpu_transcoder);
else
return HSW_SRD_AUX_CTL;
}
@@ -332,7 +354,7 @@ static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, int i)
{
if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_AUX_DATA(cpu_transcoder, i);
+ return EDP_PSR_AUX_DATA(dev_priv, cpu_transcoder, i);
else
return HSW_SRD_AUX_DATA(i);
}
@@ -356,12 +378,12 @@ static void psr_irq_control(struct intel_dp *intel_dp)
}
static void psr_event_print(struct drm_i915_private *i915,
- u32 val, bool psr2_enabled)
+ u32 val, bool sel_update_enabled)
{
drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
- if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
+ if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
@@ -389,7 +411,7 @@ static void psr_event_print(struct drm_i915_private *i915,
drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
if (val & PSR_EVENT_LPSP_MODE_EXIT)
drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
- if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
+ if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
}
@@ -415,9 +437,11 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
if (DISPLAY_VER(dev_priv) >= 9) {
u32 val;
- val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
+ val = intel_de_rmw(dev_priv,
+ PSR_EVENT(dev_priv, cpu_transcoder),
+ 0, 0);
- psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
+ psr_event_print(dev_priv, val, intel_dp->psr.sel_update_enabled);
}
}
@@ -442,16 +466,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
}
}
-static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
-{
- u8 alpm_caps = 0;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
- &alpm_caps) != 1)
- return false;
- return alpm_caps & DP_ALPM_CAP;
-}
-
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -466,6 +480,40 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
return val;
}
+static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
+{
+ u8 su_capability = 0;
+
+ if (intel_dp->psr.sink_panel_replay_su_support)
+ drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_PANEL_PANEL_REPLAY_CAPABILITY,
+ &su_capability);
+ else
+ su_capability = intel_dp->psr_dpcd[1];
+
+ return su_capability;
+}
+
+static unsigned int
+intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
+{
+ return intel_dp->psr.sink_panel_replay_su_support ?
+ DP_PANEL_PANEL_REPLAY_X_GRANULARITY :
+ DP_PSR2_SU_X_GRANULARITY;
+}
+
+static unsigned int
+intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
+{
+ return intel_dp->psr.sink_panel_replay_su_support ?
+ DP_PANEL_PANEL_REPLAY_Y_GRANULARITY :
+ DP_PSR2_SU_Y_GRANULARITY;
+}
+
+/*
+ * Note: Bits related to granularity are same in panel replay and psr
+ * registers. Rely on PSR definitions on these "common" bits.
+ */
static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -473,18 +521,29 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
u16 w;
u8 y;
- /* If sink don't have specific granularity requirements set legacy ones */
- if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
+ /*
+ * TODO: Do we need to take into account panel supporting both PSR and
+ * Panel replay?
+ */
+
+ /*
+ * If sink don't have specific granularity requirements set legacy
+ * ones.
+ */
+ if (!(intel_dp_get_su_capability(intel_dp) &
+ DP_PSR2_SU_GRANULARITY_REQUIRED)) {
/* As PSR2 HW sends full lines, we do not care about x granularity */
w = 4;
y = 4;
goto exit;
}
- r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
+ r = drm_dp_dpcd_read(&intel_dp->aux,
+ intel_dp_get_su_x_granularity_offset(intel_dp),
+ &w, 2);
if (r != 2)
drm_dbg_kms(&i915->drm,
- "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
+ "Unable to read selective update x granularity\n");
/*
* Spec says that if the value read is 0 the default granularity should
* be used instead.
@@ -492,10 +551,12 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
if (r != 2 || w == 0)
w = 4;
- r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
+ r = drm_dp_dpcd_read(&intel_dp->aux,
+ intel_dp_get_su_y_granularity_offset(intel_dp),
+ &y, 1);
if (r != 1) {
drm_dbg_kms(&i915->drm,
- "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
+ "Unable to read selective update y granularity\n");
y = 4;
}
if (y == 0)
@@ -509,20 +570,16 @@ exit:
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 pr_dpcd = 0;
- intel_dp->psr.sink_panel_replay_support = false;
- drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
+ intel_dp->psr.sink_panel_replay_support = true;
- if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
- drm_dbg_kms(&i915->drm,
- "Panel replay is not supported by panel\n");
- return;
- }
+ if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
+ intel_dp->psr.sink_panel_replay_su_support = true;
drm_dbg_kms(&i915->drm,
- "Panel replay is supported by panel\n");
- intel_dp->psr.sink_panel_replay_support = true;
+ "Panel replay %sis supported by panel\n",
+ intel_dp->psr.sink_panel_replay_su_support ?
+ "selective_update " : "");
}
static void _psr_init_dpcd(struct intel_dp *intel_dp)
@@ -553,7 +610,6 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = intel_dp->psr_dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
- bool alpm = intel_dp_get_alpm_status(intel_dp);
/*
* All panels that supports PSR version 03h (PSR2 +
@@ -566,7 +622,8 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
* Y-coordinate requirement panels we would need to enable
* GTC first.
*/
- intel_dp->psr.sink_psr2_support = y_req && alpm;
+ intel_dp->psr.sink_psr2_support = y_req &&
+ intel_alpm_aux_wake_supported(intel_dp);
drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
intel_dp->psr.sink_psr2_support ? "" : "not ");
}
@@ -574,15 +631,19 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
- _panel_replay_init_dpcd(intel_dp);
-
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP,
+ &intel_dp->pr_dpcd);
+
+ if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SUPPORT)
+ _panel_replay_init_dpcd(intel_dp);
if (intel_dp->psr_dpcd[0])
_psr_init_dpcd(intel_dp);
- if (intel_dp->psr.sink_psr2_support)
+ if (intel_dp->psr.sink_psr2_support ||
+ intel_dp->psr.sink_panel_replay_su_support)
intel_dp_get_su_granularity(intel_dp);
}
@@ -623,68 +684,76 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
aux_ctl);
}
-static bool psr2_su_region_et_valid(struct intel_dp *intel_dp)
+static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- if (DISPLAY_VER(i915) >= 20 &&
- intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
- !(intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE))
- return true;
+ if (DISPLAY_VER(i915) < 20 || !intel_dp_is_edp(intel_dp) ||
+ intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
+ return false;
- return false;
+ return panel_replay ?
+ intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
+ intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
+ psr2_su_region_et_global_enabled(intel_dp);
}
-static unsigned int intel_psr_get_enable_sink_offset(struct intel_dp *intel_dp)
+static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
- return intel_dp->psr.panel_replay_enabled ?
- PANEL_REPLAY_CONFIG : DP_PSR_EN_CFG;
-}
+ u8 val = DP_PANEL_REPLAY_ENABLE |
+ DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
+ DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
+ DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
+ DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
-/*
- * Note: Most of the bits are same in PANEL_REPLAY_CONFIG and DP_PSR_EN_CFG. We
- * are relying on PSR definitions on these "common" bits.
- */
-void intel_psr_enable_sink(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 dpcd_val = DP_PSR_ENABLE;
+ if (crtc_state->has_sel_update)
+ val |= DP_PANEL_REPLAY_SU_ENABLE;
- if (crtc_state->has_psr2) {
- /* Enable ALPM at sink for psr2 */
- if (!crtc_state->has_panel_replay) {
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE |
- DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+ if (crtc_state->enable_psr2_su_region_et)
+ val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
- if (psr2_su_region_et_valid(intel_dp))
- dpcd_val |= DP_PSR_ENABLE_SU_REGION_ET;
- }
+ drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
+}
+
+static void _psr_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 val = DP_PSR_ENABLE;
- dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
+ if (crtc_state->has_sel_update) {
+ val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
} else {
if (intel_dp->psr.link_standby)
- dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+ val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (!crtc_state->has_panel_replay && DISPLAY_VER(dev_priv) >= 8)
- dpcd_val |= DP_PSR_CRC_VERIFICATION;
+ if (DISPLAY_VER(i915) >= 8)
+ val |= DP_PSR_CRC_VERIFICATION;
}
- if (crtc_state->has_panel_replay)
- dpcd_val |= DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
- DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN;
-
- if (crtc_state->req_psr2_sdp_prior_scanline)
- dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
+ if (crtc_state->enable_psr2_su_region_et)
+ val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
if (intel_dp->psr.entry_setup_frames > 0)
- dpcd_val |= DP_PSR_FRAME_CAPTURE;
+ val |= DP_PSR_FRAME_CAPTURE;
- drm_dp_dpcd_writeb(&intel_dp->aux,
- intel_psr_get_enable_sink_offset(intel_dp),
- dpcd_val);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
+}
+
+void intel_psr_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ /* Enable ALPM at sink for psr2 */
+ if (!crtc_state->has_panel_replay && crtc_state->has_sel_update)
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE |
+ DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+
+ crtc_state->has_panel_replay ?
+ _panel_replay_enable_sink(intel_dp, crtc_state) :
+ _psr_enable_sink(intel_dp, crtc_state);
if (intel_dp_is_edp(intel_dp))
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
@@ -814,8 +883,8 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
static int psr2_block_count_lines(struct intel_dp *intel_dp)
{
- return intel_dp->psr.alpm_parameters.io_wake_lines < 9 &&
- intel_dp->psr.alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
+ return intel_dp->alpm_parameters.io_wake_lines < 9 &&
+ intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
}
static int psr2_block_count(struct intel_dp *intel_dp)
@@ -842,7 +911,8 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
+ intel_de_rmw(dev_priv,
+ PSR2_MAN_TRK_CTL(dev_priv, intel_dp->psr.transcoder),
0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
@@ -852,7 +922,6 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_psr *psr = &intel_dp->psr;
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
@@ -869,7 +938,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= intel_psr2_get_tp_time(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12 && DISPLAY_VER(dev_priv) < 20) {
if (psr2_block_count(intel_dp) > 2)
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
else
@@ -894,18 +963,20 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
*/
int tmp;
- tmp = map[psr->alpm_parameters.io_wake_lines -
+ tmp = map[intel_dp->alpm_parameters.io_wake_lines -
TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
- tmp = map[psr->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
+ } else if (DISPLAY_VER(dev_priv) >= 20) {
+ val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
} else if (DISPLAY_VER(dev_priv) >= 12) {
- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
- val |= TGL_EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
} else if (DISPLAY_VER(dev_priv) >= 9) {
- val |= EDP_PSR2_IO_BUFFER_WAKE(psr->alpm_parameters.io_wake_lines);
- val |= EDP_PSR2_FAST_WAKE(psr->alpm_parameters.fast_wake_lines);
+ val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@@ -917,13 +988,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
- tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv,
+ PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
+ intel_de_write(dev_priv,
+ PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), 0);
}
- if (psr2_su_region_et_valid(intel_dp))
+ if (intel_dp->psr.su_region_et_enabled)
val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
/*
@@ -932,7 +1005,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
*/
intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
- intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder), val);
}
static bool
@@ -963,7 +1036,7 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
+ intel_de_rmw(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
EDP_PSR2_IDLE_FRAMES_MASK,
EDP_PSR2_IDLE_FRAMES(idle_frames));
}
@@ -1088,9 +1161,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (psr2_su_region_et_valid(intel_dp))
- crtc_state->enable_psr2_su_region_et = true;
-
return crtc_state->enable_psr2_sel_fetch = true;
}
@@ -1161,237 +1231,6 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
}
-/*
- * See Bspec: 71632 for the table
- *
- * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2)
- *
- * Half cycle duration:
- *
- * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns
- * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) )
- *
- * Link rates 5.4 - 8.1
- * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10
- * LFPS Period chosen is the mid-point of the min:max values from the table
- * FLOOR( LFPS Period in Symbol clocks /
- * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) )
- */
-static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
- int *silence_period,
- int *lfps_half_cycle)
-{
- switch (link_rate) {
- case 162000:
- *silence_period = 20;
- *lfps_half_cycle = 5;
- break;
- case 216000:
- *silence_period = 27;
- *lfps_half_cycle = 7;
- break;
- case 243000:
- *silence_period = 31;
- *lfps_half_cycle = 8;
- break;
- case 270000:
- *silence_period = 34;
- *lfps_half_cycle = 9;
- break;
- case 324000:
- *silence_period = 41;
- *lfps_half_cycle = 11;
- break;
- case 432000:
- *silence_period = 56;
- *lfps_half_cycle = 15;
- break;
- case 540000:
- *silence_period = 69;
- *lfps_half_cycle = 12;
- break;
- case 648000:
- *silence_period = 84;
- *lfps_half_cycle = 15;
- break;
- case 675000:
- *silence_period = 87;
- *lfps_half_cycle = 15;
- break;
- case 810000:
- *silence_period = 104;
- *lfps_half_cycle = 19;
- break;
- default:
- *silence_period = *lfps_half_cycle = -1;
- return false;
- }
- return true;
-}
-
-/*
- * AUX-Less Wake Time = CEILING( ((PHY P2 to P0) + tLFPS_Period, Max+
- * tSilence, Max+ tPHY Establishment + tCDS) / tline)
- * For the "PHY P2 to P0" latency see the PHY Power Control page
- * (PHY P2 to P0) : https://gfxspecs.intel.com/Predator/Home/Index/68965
- * : 12 us
- * The tLFPS_Period, Max term is 800ns
- * The tSilence, Max term is 180ns
- * The tPHY Establishment (a.k.a. t1) term is 50us
- * The tCDS term is 1 or 2 times t2
- * t2 = Number ML_PHY_LOCK * tML_PHY_LOCK
- * Number ML_PHY_LOCK = ( 7 + CEILING( 6.5us / tML_PHY_LOCK ) + 1)
- * Rounding up the 6.5us padding to the next ML_PHY_LOCK boundary and
- * adding the "+ 1" term ensures all ML_PHY_LOCK sequences that start
- * within the CDS period complete within the CDS period regardless of
- * entry into the period
- * tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) )
- * TPS4 Length = 252 Symbols
- */
-static int _lnl_compute_aux_less_wake_time(int port_clock)
-{
- int tphy2_p2_to_p0 = 12 * 1000;
- int tlfps_period_max = 800;
- int tsilence_max = 180;
- int t1 = 50 * 1000;
- int tps4 = 252;
- int tml_phy_lock = 1000 * 1000 * tps4 * 10 / port_clock;
- int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1;
- int t2 = num_ml_phy_lock * tml_phy_lock;
- int tcds = 1 * t2;
-
- return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max +
- t1 + tcds, 1000);
-}
-
-static int _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int aux_less_wake_time, aux_less_wake_lines, silence_period,
- lfps_half_cycle;
-
- aux_less_wake_time =
- _lnl_compute_aux_less_wake_time(crtc_state->port_clock);
- aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
- aux_less_wake_time);
-
- if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock,
- &silence_period,
- &lfps_half_cycle))
- return false;
-
- if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
- silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK ||
- lfps_half_cycle > PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK)
- return false;
-
- if (i915->display.params.psr_safest_params)
- aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
-
- intel_dp->psr.alpm_parameters.fast_wake_lines = aux_less_wake_lines;
- intel_dp->psr.alpm_parameters.silence_period_sym_clocks = silence_period;
- intel_dp->psr.alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle;
-
- return true;
-}
-
-static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int check_entry_lines;
-
- if (DISPLAY_VER(i915) < 20)
- return true;
-
- /* ALPM Entry Check = 2 + CEILING( 5us /tline ) */
- check_entry_lines = 2 +
- intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 5);
-
- if (check_entry_lines > 15)
- return false;
-
- if (!_lnl_compute_aux_less_alpm_params(intel_dp, crtc_state))
- return false;
-
- if (i915->display.params.psr_safest_params)
- check_entry_lines = 15;
-
- intel_dp->psr.alpm_parameters.check_entry_lines = check_entry_lines;
-
- return true;
-}
-
-/*
- * IO wake time for DISPLAY_VER < 12 is not directly mentioned in Bspec. There
- * are 50 us io wake time and 32 us fast wake time. Clearly preharge pulses are
- * not (improperly) included in 32 us fast wake time. 50 us - 32 us = 18 us.
- */
-static int skl_io_buffer_wake_time(void)
-{
- return 18;
-}
-
-static int tgl_io_buffer_wake_time(void)
-{
- return 10;
-}
-
-static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- if (DISPLAY_VER(i915) >= 12)
- return tgl_io_buffer_wake_time();
- else
- return skl_io_buffer_wake_time();
-}
-
-static bool _compute_alpm_params(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
- int tfw_exit_latency = 20; /* eDP spec */
- int phy_wake = 4; /* eDP spec */
- int preamble = 8; /* eDP spec */
- int precharge = intel_dp_aux_fw_sync_len() - preamble;
- u8 max_wake_lines;
-
- io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) +
- preamble + phy_wake + tfw_exit_latency;
- fast_wake_time = precharge + preamble + phy_wake +
- tfw_exit_latency;
-
- if (DISPLAY_VER(i915) >= 12)
- /* TODO: Check how we can use ALPM_CTL fast wake extended field */
- max_wake_lines = 12;
- else
- max_wake_lines = 8;
-
- io_wake_lines = intel_usecs_to_scanlines(
- &crtc_state->hw.adjusted_mode, io_wake_time);
- fast_wake_lines = intel_usecs_to_scanlines(
- &crtc_state->hw.adjusted_mode, fast_wake_time);
-
- if (io_wake_lines > max_wake_lines ||
- fast_wake_lines > max_wake_lines)
- return false;
-
- if (!_lnl_compute_alpm_params(intel_dp, crtc_state))
- return false;
-
- if (i915->display.params.psr_safest_params)
- io_wake_lines = fast_wake_lines = max_wake_lines;
-
- /* According to Bspec lower limit should be set as 7 lines. */
- intel_dp->psr.alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
- intel_dp->psr.alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
-
- return true;
-}
-
static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode)
{
@@ -1425,6 +1264,31 @@ static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
return entry_setup_frames;
}
+static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
+ crtc_state->hw.adjusted_mode.crtc_vblank_start;
+ int wake_lines;
+
+ if (crtc_state->has_panel_replay)
+ wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
+ else
+ wake_lines = DISPLAY_VER(i915) < 20 ?
+ psr2_block_count_lines(intel_dp) :
+ intel_dp->alpm_parameters.io_wake_lines;
+
+ if (crtc_state->req_psr2_sdp_prior_scanline)
+ vblank -= 1;
+
+ /* Vblank >= PSR2_CTL Block Count Number maximum line count */
+ if (vblank < wake_lines)
+ return false;
+
+ return true;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -1461,11 +1325,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!psr2_global_enabled(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
- return false;
- }
-
/*
* DSC and PSR2 cannot be enabled simultaneously. If a requested
* resolution requires DSC to be enabled, priority is given to DSC
@@ -1478,12 +1337,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (crtc_state->crc_enabled) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
- return false;
- }
-
if (DISPLAY_VER(dev_priv) >= 12) {
psr_max_h = 5120;
psr_max_v = 3200;
@@ -1519,45 +1372,73 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!_compute_alpm_params(intel_dp, crtc_state)) {
+ if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, Unable to use long enough wake times\n");
return false;
}
/* Vblank >= PSR2_CTL Block Count Number maximum line count */
- if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
- crtc_state->hw.adjusted_mode.crtc_vblank_start <
- psr2_block_count_lines(intel_dp)) {
+ if (!wake_lines_fit_into_vblank(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, too short vblank time\n");
return false;
}
- if (HAS_PSR2_SEL_FETCH(dev_priv)) {
- if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
- !HAS_PSR_HW_TRACKING(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
- return false;
- }
- }
-
- if (!psr2_granularity_check(intel_dp, crtc_state)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
- goto unsupported;
- }
-
if (!crtc_state->enable_psr2_sel_fetch &&
(crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
psr_max_h, psr_max_v);
- goto unsupported;
+ return false;
}
tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
+
+ if (psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay))
+ crtc_state->enable_psr2_su_region_et = true;
+
+ return true;
+}
+
+static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (HAS_PSR2_SEL_FETCH(dev_priv) &&
+ !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
+ !HAS_PSR_HW_TRACKING(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
+ goto unsupported;
+ }
+
+ if (!psr2_global_enabled(intel_dp)) {
+ drm_dbg_kms(&dev_priv->drm, "Selective update disabled by flag\n");
+ goto unsupported;
+ }
+
+ if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
+ goto unsupported;
+
+ if (crtc_state->has_panel_replay && (DISPLAY_VER(dev_priv) < 14 ||
+ !intel_dp->psr.sink_panel_replay_su_support))
+ goto unsupported;
+
+ if (crtc_state->crc_enabled) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Selective update not enabled because it would inhibit pipe CRC calculation\n");
+ goto unsupported;
+ }
+
+ if (!psr2_granularity_check(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Selective update not enabled, SU granularity not compatible\n");
+ goto unsupported;
+ }
+
return true;
unsupported:
@@ -1595,6 +1476,21 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
return true;
}
+static bool _panel_replay_compute_config(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!CAN_PANEL_REPLAY(intel_dp))
+ return false;
+
+ if (!panel_replay_global_enabled(intel_dp)) {
+ drm_dbg_kms(&i915->drm, "Panel Replay disabled by flag\n");
+ return false;
+ }
+
+ return true;
+}
+
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -1620,18 +1516,17 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
}
/*
- * FIXME figure out what is wrong with PSR+bigjoiner and
+ * FIXME figure out what is wrong with PSR+joiner and
* fix it. Presumably something related to the fact that
* PSR is a transcoder level feature.
*/
- if (crtc_state->bigjoiner_pipes) {
+ if (crtc_state->joiner_pipes) {
drm_dbg_kms(&dev_priv->drm,
- "PSR disabled due to bigjoiner\n");
+ "PSR disabled due to joiner\n");
return;
}
- if (CAN_PANEL_REPLAY(intel_dp))
- crtc_state->has_panel_replay = true;
+ crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp);
crtc_state->has_psr = crtc_state->has_panel_replay ? true :
_psr_compute_config(intel_dp, crtc_state);
@@ -1639,7 +1534,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
if (!crtc_state->has_psr)
return;
- crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+ crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1672,20 +1567,24 @@ void intel_psr_get_config(struct intel_encoder *encoder,
pipe_config->has_psr = true;
}
- pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
+ pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
- if (!intel_dp->psr.psr2_enabled)
+ if (!intel_dp->psr.sel_update_enabled)
goto unlock;
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
- val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
+ val = intel_de_read(dev_priv,
+ PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
if (val & PSR2_MAN_TRK_CTL_ENABLE)
pipe_config->enable_psr2_sel_fetch = true;
}
+ pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
+
if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
+ val = intel_de_read(dev_priv,
+ TRANS_EXITLINE(dev_priv, cpu_transcoder));
pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
}
unlock:
@@ -1699,7 +1598,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
drm_WARN_ON(&dev_priv->drm,
transcoder_has_psr2(dev_priv, cpu_transcoder) &&
- intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
+ intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder)) & EDP_PSR2_ENABLE);
drm_WARN_ON(&dev_priv->drm,
intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
@@ -1711,7 +1610,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
/* psr1, psr2 and panel-replay are mutually exclusive.*/
if (intel_dp->psr.panel_replay_enabled)
dg2_activate_panel_replay(intel_dp);
- else if (intel_dp->psr.psr2_enabled)
+ else if (intel_dp->psr.sel_update_enabled)
hsw_activate_psr2(intel_dp);
else
hsw_activate_psr1(intel_dp);
@@ -1763,51 +1662,6 @@ static void wm_optimization_wa(struct intel_dp *intel_dp,
wa_16013835468_bit_get(intel_dp), 0);
}
-static void lnl_alpm_configure(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- struct intel_psr *psr = &intel_dp->psr;
- u32 alpm_ctl;
-
- if (DISPLAY_VER(dev_priv) < 20 || (!intel_dp->psr.psr2_enabled &&
- !intel_dp_is_edp(intel_dp)))
- return;
-
- /*
- * Panel Replay on eDP is always using ALPM aux less. I.e. no need to
- * check panel support at this point.
- */
- if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
- alpm_ctl = ALPM_CTL_ALPM_ENABLE |
- ALPM_CTL_ALPM_AUX_LESS_ENABLE |
- ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS;
-
- intel_de_write(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
- PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
- PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
- PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
- PORT_ALPM_CTL_SILENCE_PERIOD(
- psr->alpm_parameters.silence_period_sym_clocks));
-
- intel_de_write(dev_priv, PORT_ALPM_LFPS_CTL(cpu_transcoder),
- PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
- PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
- psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
- PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
- psr->alpm_parameters.lfps_half_cycle_num_of_syms) |
- PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
- psr->alpm_parameters.lfps_half_cycle_num_of_syms));
- } else {
- alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
- ALPM_CTL_EXTENDED_FAST_WAKE_TIME(psr->alpm_parameters.fast_wake_lines);
- }
-
- alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(psr->alpm_parameters.check_entry_lines);
-
- intel_de_write(dev_priv, ALPM_CTL(cpu_transcoder), alpm_ctl);
-}
-
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -1877,7 +1731,9 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* transcoder, EXITLINE will need to be unset when disabling PSR
*/
if (intel_dp->psr.dc3co_exitline)
- intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
+ intel_de_rmw(dev_priv,
+ TRANS_EXITLINE(dev_priv, cpu_transcoder),
+ EXITLINE_MASK,
intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
@@ -1886,7 +1742,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
IGNORE_PSR2_HW_TRACKING : 0);
if (intel_dp_is_edp(intel_dp))
- lnl_alpm_configure(intel_dp);
+ intel_alpm_configure(intel_dp, crtc_state);
/*
* Wa_16013835468
@@ -1894,7 +1750,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
*/
wm_optimization_wa(intel_dp, crtc_state);
- if (intel_dp->psr.psr2_enabled) {
+ if (intel_dp->psr.sel_update_enabled) {
if (DISPLAY_VER(dev_priv) == 9)
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
PSR2_VSC_ENABLE_PROG_HEADER |
@@ -1905,15 +1761,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* All supported adlp panels have 1-based X granularity, this may
* cause issues if non-supported panels are used.
*/
- if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(dev_priv))
+ if (!intel_dp->psr.panel_replay_enabled &&
+ (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ IS_ALDERLAKE_P(dev_priv)))
intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
/* Wa_16012604467:adlp,mtl[a0,b0] */
- if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
+ if (!intel_dp->psr.panel_replay_enabled &&
+ IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
intel_de_rmw(dev_priv,
- MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
+ MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
+ 0,
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
else if (IS_ALDERLAKE_P(dev_priv))
intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
@@ -1960,7 +1819,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
- intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+ intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
intel_dp->psr.busy_frontbuffer_bits = 0;
intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
@@ -1970,6 +1829,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.dc3co_exit_delay = val;
intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+ intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
intel_dp->psr.req_psr2_sdp_prior_scanline =
crtc_state->req_psr2_sdp_prior_scanline;
@@ -1981,7 +1841,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
} else {
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
- intel_dp->psr.psr2_enabled ? "2" : "1");
+ intel_dp->psr.sel_update_enabled ? "2" : "1");
/*
* Panel replay has to be enabled before link training: doing it
@@ -2008,7 +1868,8 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
if (!intel_dp->psr.active) {
if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
- val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv, cpu_transcoder));
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
}
@@ -2021,10 +1882,11 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
if (intel_dp->psr.panel_replay_enabled) {
intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
- } else if (intel_dp->psr.psr2_enabled) {
+ } else if (intel_dp->psr.sel_update_enabled) {
tgl_disallow_dc3co_on_psr2_exit(intel_dp);
- val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
+ val = intel_de_rmw(dev_priv,
+ EDP_PSR2_CTL(dev_priv, cpu_transcoder),
EDP_PSR2_ENABLE, 0);
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
@@ -2044,8 +1906,9 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
i915_reg_t psr_status;
u32 psr_status_mask;
- if (intel_dp->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS(cpu_transcoder);
+ if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
+ intel_dp->psr.panel_replay_enabled)) {
+ psr_status = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
psr_status = psr_status_reg(dev_priv, cpu_transcoder);
@@ -2072,7 +1935,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
else
drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
- intel_dp->psr.psr2_enabled ? "2" : "1");
+ intel_dp->psr.sel_update_enabled ? "2" : "1");
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
@@ -2085,11 +1948,12 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
wa_16013835468_bit_get(intel_dp), 0);
- if (intel_dp->psr.psr2_enabled) {
+ if (intel_dp->psr.sel_update_enabled) {
/* Wa_16012604467:adlp,mtl[a0,b0] */
- if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
+ if (!intel_dp->psr.panel_replay_enabled &&
+ IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
intel_de_rmw(dev_priv,
- MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
+ MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
else if (IS_ALDERLAKE_P(dev_priv))
intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
@@ -2101,26 +1965,29 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Panel Replay on eDP is always using ALPM aux less. */
if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
- intel_de_rmw(dev_priv, ALPM_CTL(cpu_transcoder),
+ intel_de_rmw(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder),
ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
- intel_de_rmw(dev_priv, PORT_ALPM_CTL(cpu_transcoder),
+ intel_de_rmw(dev_priv,
+ PORT_ALPM_CTL(dev_priv, cpu_transcoder),
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
}
/* Disable PSR on Sink */
- drm_dp_dpcd_writeb(&intel_dp->aux,
- intel_psr_get_enable_sink_offset(intel_dp), 0);
+ if (!intel_dp->psr.panel_replay_enabled) {
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
- if (!intel_dp->psr.panel_replay_enabled &&
- intel_dp->psr.psr2_enabled)
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
+ if (intel_dp->psr.sel_update_enabled)
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_RECEIVER_ALPM_CONFIG, 0);
+ }
intel_dp->psr.enabled = false;
intel_dp->psr.panel_replay_enabled = false;
- intel_dp->psr.psr2_enabled = false;
+ intel_dp->psr.sel_update_enabled = false;
intel_dp->psr.psr2_sel_fetch_enabled = false;
+ intel_dp->psr.su_region_et_enabled = false;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
}
@@ -2244,7 +2111,7 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_enabled)
intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(cpu_transcoder),
+ PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
man_trk_ctl_enable_bit_get(dev_priv) |
man_trk_ctl_partial_frame_bit_get(dev_priv) |
man_trk_ctl_single_full_frame_bit_get(dev_priv) |
@@ -2263,7 +2130,7 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
* but testing proved that it works for up display 13, for newer
* than that testing will be needed.
*/
- intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+ intel_de_write(dev_priv, CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
}
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
@@ -2286,7 +2153,7 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
break;
}
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
crtc_state->psr2_man_track_ctl);
if (!crtc_state->enable_psr2_su_region_et)
@@ -2332,19 +2199,14 @@ exit:
crtc_state->psr2_man_track_ctl = val;
}
-static u32
-psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
- bool full_update, bool cursor_in_su_area)
+static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
+ bool full_update)
{
int width, height;
if (!crtc_state->enable_psr2_su_region_et || full_update)
return 0;
- if (!cursor_in_su_area)
- return PIPESRC_WIDTH(0) |
- PIPESRC_HEIGHT(drm_rect_height(&crtc_state->pipe_src));
-
width = drm_rect_width(&crtc_state->psr2_su_area);
height = drm_rect_height(&crtc_state->psr2_su_area);
@@ -2484,7 +2346,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
crtc_state->psr2_su_area.x1 = 0;
crtc_state->psr2_su_area.y1 = -1;
- crtc_state->psr2_su_area.x2 = INT_MAX;
+ crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
crtc_state->psr2_su_area.y2 = -1;
/*
@@ -2573,8 +2435,9 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
goto skip_sel_fetch_set_loop;
/* Wa_14014971492 */
- if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
+ if (!crtc_state->has_panel_replay &&
+ ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv))) &&
crtc_state->splitter.enable)
crtc_state->psr2_su_area.y1 = 0;
@@ -2652,8 +2515,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
skip_sel_fetch_set_loop:
psr2_man_trk_ctl_calc(crtc_state, full_update);
crtc_state->pipe_srcsz_early_tpt =
- psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update,
- cursor_in_su_area);
+ psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
return 0;
}
@@ -2683,12 +2545,15 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
* - PSR disabled in new state
* - All planes will go inactive
* - Changing between PSR versions
+ * - Region Early Transport changing
* - Display WA #1136: skl, bxt
*/
needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
needs_to_disable |= !new_crtc_state->has_psr;
needs_to_disable |= !new_crtc_state->active_planes;
- needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
+ needs_to_disable |= new_crtc_state->has_sel_update != psr->sel_update_enabled;
+ needs_to_disable |= new_crtc_state->enable_psr2_su_region_et !=
+ psr->su_region_et_enabled;
needs_to_disable |= DISPLAY_VER(i915) < 11 &&
new_crtc_state->wm_level_disabled;
@@ -2761,7 +2626,7 @@ static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
* EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
*/
return intel_de_wait_for_clear(dev_priv,
- EDP_PSR2_STATUS(cpu_transcoder),
+ EDP_PSR2_STATUS(dev_priv, cpu_transcoder),
EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
}
@@ -2781,6 +2646,13 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
EDP_PSR_STATUS_STATE_MASK, 50);
}
+static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
+{
+ return intel_dp_is_edp(intel_dp) ?
+ _psr2_ready_for_pipe_update_locked(intel_dp) :
+ _psr1_ready_for_pipe_update_locked(intel_dp);
+}
+
/**
* intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
* @new_crtc_state: new CRTC state
@@ -2806,7 +2678,9 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
if (!intel_dp->psr.enabled)
continue;
- if (intel_dp->psr.psr2_enabled)
+ if (intel_dp->psr.panel_replay_enabled)
+ ret = _panel_replay_ready_for_pipe_update_locked(intel_dp);
+ else if (intel_dp->psr.sel_update_enabled)
ret = _psr2_ready_for_pipe_update_locked(intel_dp);
else
ret = _psr1_ready_for_pipe_update_locked(intel_dp);
@@ -2827,8 +2701,9 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
if (!intel_dp->psr.enabled)
return false;
- if (intel_dp->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS(cpu_transcoder);
+ if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
+ intel_dp->psr.panel_replay_enabled)) {
+ reg = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
reg = psr_status_reg(dev_priv, cpu_transcoder);
@@ -2914,10 +2789,14 @@ int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
- u32 old_mode;
+ const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
+ I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
+ u32 old_mode, old_disable_bits;
int ret;
- if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
+ if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
+ I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
+ I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
return -EINVAL;
@@ -2928,6 +2807,10 @@ int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
return ret;
old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
+ old_disable_bits = intel_dp->psr.debug &
+ (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
+ I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
+
intel_dp->psr.debug = val;
/*
@@ -2939,7 +2822,7 @@ int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
mutex_unlock(&intel_dp->psr.lock);
- if (old_mode != mode)
+ if (old_mode != mode || old_disable_bits != disable_bits)
ret = intel_psr_fastset_force(dev_priv);
return ret;
@@ -3000,15 +2883,20 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* Send one update otherwise lag is observed in screen */
- intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+ intel_de_write(dev_priv,
+ CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
+ 0);
return;
}
val = man_trk_ctl_enable_bit_get(dev_priv) |
man_trk_ctl_partial_frame_bit_get(dev_priv) |
man_trk_ctl_continuos_full_frame(dev_priv);
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
- intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+ intel_de_write(dev_priv,
+ PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
+ val);
+ intel_de_write(dev_priv,
+ CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
} else {
intel_psr_exit(intel_dp);
@@ -3068,7 +2956,7 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
+ if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
!intel_dp->psr.active)
return;
@@ -3105,9 +2993,12 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
* SU configuration in case update is sent for any reason after
* sff bit gets cleared by the HW on next vblank.
*/
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
+ intel_de_write(dev_priv,
+ PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
val);
- intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+ intel_de_write(dev_priv,
+ CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
+ 0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
}
} else {
@@ -3219,9 +3110,6 @@ void intel_psr_init(struct intel_dp *intel_dp)
else
intel_dp->psr.source_support = true;
- /* Disable early transport for now */
- intel_dp->psr.debug |= I915_PSR_DEBUG_SU_REGION_ET_DISABLE;
-
/* Set link_standby x link_off defaults */
if (DISPLAY_VER(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */
@@ -3266,7 +3154,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
u8 val;
int r;
- if (!psr->psr2_enabled)
+ if (!psr->sel_update_enabled)
return;
r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
@@ -3446,7 +3334,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
const char *status = "unknown";
u32 val, status_val;
- if (intel_dp->psr.psr2_enabled) {
+ if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
+ intel_dp->psr.panel_replay_enabled)) {
static const char * const live_status[] = {
"IDLE",
"CAPTURE",
@@ -3460,7 +3349,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
"BUF_ON",
"TG_ON"
};
- val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_STATUS(dev_priv, cpu_transcoder));
status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
@@ -3484,22 +3374,66 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
}
-static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
+static void intel_psr_sink_capability(struct intel_dp *intel_dp,
+ struct seq_file *m)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
- intel_wakeref_t wakeref;
- const char *status;
- bool enabled;
- u32 val;
seq_printf(m, "Sink support: PSR = %s",
str_yes_no(psr->sink_support));
if (psr->sink_support)
seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
- seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
+ if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
+ seq_printf(m, " (Early Transport)");
+ seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
+ seq_printf(m, ", Panel Replay Selective Update = %s",
+ str_yes_no(psr->sink_panel_replay_su_support));
+ if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
+ seq_printf(m, " (Early Transport)");
+ seq_printf(m, "\n");
+}
+
+static void intel_psr_print_mode(struct intel_dp *intel_dp,
+ struct seq_file *m)
+{
+ struct intel_psr *psr = &intel_dp->psr;
+ const char *status, *mode, *region_et;
+
+ if (psr->enabled)
+ status = " enabled";
+ else
+ status = "disabled";
+
+ if (psr->panel_replay_enabled && psr->sel_update_enabled)
+ mode = "Panel Replay Selective Update";
+ else if (psr->panel_replay_enabled)
+ mode = "Panel Replay";
+ else if (psr->sel_update_enabled)
+ mode = "PSR2";
+ else if (psr->enabled)
+ mode = "PSR1";
+ else
+ mode = "";
+
+ if (psr->su_region_et_enabled)
+ region_et = " (Early Transport)";
+ else
+ region_et = "";
+
+ seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
+}
+
+static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct intel_psr *psr = &intel_dp->psr;
+ intel_wakeref_t wakeref;
+ bool enabled;
+ u32 val, psr2_ctl;
+
+ intel_psr_sink_capability(intel_dp, m);
if (!(psr->sink_support || psr->sink_panel_replay_support))
return 0;
@@ -3507,13 +3441,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&psr->lock);
- if (psr->panel_replay_enabled)
- status = "Panel Replay Enabled";
- else if (psr->enabled)
- status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
- else
- status = "disabled";
- seq_printf(m, "PSR mode: %s\n", status);
+ intel_psr_print_mode(intel_dp, m);
if (!psr->enabled) {
seq_printf(m, "PSR sink not reliable: %s\n",
@@ -3524,9 +3452,16 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
if (psr->panel_replay_enabled) {
val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
+
+ if (intel_dp_is_edp(intel_dp))
+ psr2_ctl = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv,
+ cpu_transcoder));
+
enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
- } else if (psr->psr2_enabled) {
- val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
+ } else if (psr->sel_update_enabled) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv, cpu_transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
@@ -3534,6 +3469,9 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
}
seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
str_enabled_disabled(enabled), val);
+ if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
+ seq_printf(m, "PSR2_CTL: 0x%08x\n",
+ psr2_ctl);
psr_source_status(intel_dp, m);
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
psr->busy_frontbuffer_bits);
@@ -3551,7 +3489,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
}
- if (psr->psr2_enabled) {
+ if (psr->sel_update_enabled) {
u32 su_frames_val[3];
int frame;
@@ -3560,7 +3498,8 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
* frame boundary between register reads
*/
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
+ val = intel_de_read(dev_priv,
+ PSR2_SU_STATUS(dev_priv, cpu_transcoder, frame));
su_frames_val[frame / 3] = val;
}
@@ -3694,16 +3633,9 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
"reserved",
"sink internal error",
};
- static const char * const panel_replay_status[] = {
- "Sink device frame is locked to the Source device",
- "Sink device is coasting, using the VTotal target",
- "Sink device is governing the frame rate (frame rate unlock is granted)",
- "Sink device in the process of re-locking with the Source device",
- };
const char *str;
int ret;
u8 status, error_status;
- u32 idx;
if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
seq_puts(m, "PSR/Panel-Replay Unsupported\n");
@@ -3717,16 +3649,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
if (ret)
return ret;
- str = "unknown";
- if (intel_dp->psr.panel_replay_enabled) {
- idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
- if (idx < ARRAY_SIZE(panel_replay_status))
- str = panel_replay_status[idx];
- } else if (intel_dp->psr.enabled) {
- idx = status & DP_PSR_SINK_STATE_MASK;
- if (idx < ARRAY_SIZE(sink_status))
- str = sink_status[idx];
- }
+ status &= DP_PSR_SINK_STATE_MASK;
+ if (status < ARRAY_SIZE(sink_status))
+ str = sink_status[status];
+ else
+ str = "unknown";
seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);