diff options
Diffstat (limited to 'drivers/gpu/drm/i915/display')
43 files changed, 1643 insertions, 857 deletions
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 4fec5bd64920..8c55f5bee9ab 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1469,8 +1469,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); if (gen11_dsi_is_periodic_cmd_mode(intel_dsi)) - pipe_config->hw.adjusted_mode.private_flags |= - I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE; + pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE; } static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, @@ -1558,10 +1557,6 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5; - /* We would not operate in periodic command mode */ - pipe_config->hw.adjusted_mode.private_flags &= - ~I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE; - /* * In case of TE GATE cmd mode, we * receive TE from the slave if @@ -1569,14 +1564,14 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, */ if (is_cmd_mode(intel_dsi)) { if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A))) - pipe_config->hw.adjusted_mode.private_flags |= + pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0; else if (intel_dsi->ports == BIT(PORT_B)) - pipe_config->hw.adjusted_mode.private_flags |= + pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE1; else - pipe_config->hw.adjusted_mode.private_flags |= + pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE0; } @@ -1954,6 +1949,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) return; err: + drm_connector_cleanup(connector); drm_encoder_cleanup(&encoder->base); kfree(intel_dsi); kfree(intel_connector); diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index d043057d2fa0..630f49b7aa01 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -249,9 +249,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->update_wm_post = false; crtc_state->fifo_changed = false; crtc_state->preload_luts = false; + crtc_state->inherited = false; crtc_state->wm.need_postvbl_update = false; crtc_state->fb_bits = 0; crtc_state->update_planes = 0; + crtc_state->dsb = NULL; return &crtc_state->uapi; } @@ -292,6 +294,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc, { struct intel_crtc_state *crtc_state = to_intel_crtc_state(state); + drm_WARN_ON(crtc->dev, crtc_state->dsb); + __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); intel_crtc_free_hw_state(crtc_state); kfree(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 839124647202..6593e2c38043 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -479,7 +479,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, struct drm_display_mode *panel_fixed_mode; int index; - index = i915_modparams.vbt_sdvo_panel_type; + index = dev_priv->params.vbt_sdvo_panel_type; if (index == -2) { drm_dbg_kms(&dev_priv->drm, "Ignore SDVO panel mode from BIOS VBT tables.\n"); @@ -829,9 +829,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) u8 vswing; /* Don't read from VBT if module parameter has valid value*/ - if (i915_modparams.edp_vswing) { + if (dev_priv->params.edp_vswing) { dev_priv->vbt.edp.low_vswing = - i915_modparams.edp_vswing == 1; + dev_priv->params.edp_vswing == 1; } else { vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; dev_priv->vbt.edp.low_vswing = vswing == 0; @@ -1619,30 +1619,18 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) return 0; } -static enum port dvo_port_to_port(u8 dvo_port) +static enum port __dvo_port_to_port(int n_ports, int n_dvo, + const int port_mapping[][3], u8 dvo_port) { - /* - * Each DDI port can have more than one value on the "DVO Port" field, - * so look for all the possible values for each port. - */ - static const int dvo_ports[][3] = { - [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, - [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, - [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1}, - [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1}, - [PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, - [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1}, - [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1}, - }; enum port port; int i; - for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) { - for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) { - if (dvo_ports[port][i] == -1) + for (port = PORT_A; port < n_ports; port++) { + for (i = 0; i < n_dvo; i++) { + if (port_mapping[port][i] == -1) break; - if (dvo_port == dvo_ports[port][i]) + if (dvo_port == port_mapping[port][i]) return port; } } @@ -1650,6 +1638,48 @@ static enum port dvo_port_to_port(u8 dvo_port) return PORT_NONE; } +static enum port dvo_port_to_port(struct drm_i915_private *dev_priv, + u8 dvo_port) +{ + /* + * Each DDI port can have more than one value on the "DVO Port" field, + * so look for all the possible values for each port. + */ + static const int port_mapping[][3] = { + [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 }, + [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 }, + [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 }, + [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 }, + [PORT_E] = { DVO_PORT_HDMIE, DVO_PORT_DPE, DVO_PORT_CRT }, + [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 }, + [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 }, + }; + /* + * Bspec lists the ports as A, B, C, D - however internally in our + * driver we keep them as PORT_A, PORT_B, PORT_D and PORT_E so the + * registers in Display Engine match the right offsets. Apply the + * mapping here to translate from VBT to internal convention. + */ + static const int rkl_port_mapping[][3] = { + [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 }, + [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 }, + [PORT_C] = { -1 }, + [PORT_D] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 }, + [PORT_E] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 }, + }; + + if (IS_ROCKETLAKE(dev_priv)) + return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping), + ARRAY_SIZE(rkl_port_mapping[0]), + rkl_port_mapping, + dvo_port); + else + return __dvo_port_to_port(ARRAY_SIZE(port_mapping), + ARRAY_SIZE(port_mapping[0]), + port_mapping, + dvo_port); +} + static void parse_ddi_port(struct drm_i915_private *dev_priv, struct display_device_data *devdata, u8 bdb_version) @@ -1659,7 +1689,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; enum port port; - port = dvo_port_to_port(child->dvo_port); + port = dvo_port_to_port(dev_priv, child->dvo_port); if (port == PORT_NONE) return; @@ -2603,10 +2633,10 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, aux_ch = AUX_CH_B; break; case DP_AUX_C: - aux_ch = AUX_CH_C; + aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_D : AUX_CH_C; break; case DP_AUX_D: - aux_ch = AUX_CH_D; + aux_ch = IS_ROCKETLAKE(dev_priv) ? AUX_CH_E : AUX_CH_D; break; case DP_AUX_E: aux_ch = AUX_CH_E; diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index fef04e2d954e..bd060404d249 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -5,12 +5,12 @@ #include <drm/drm_atomic_state_helper.h> +#include "intel_atomic.h" #include "intel_bw.h" +#include "intel_cdclk.h" #include "intel_display_types.h" -#include "intel_sideband.h" -#include "intel_atomic.h" #include "intel_pm.h" - +#include "intel_sideband.h" /* Parameters for Qclk Geyserville (QGV) */ struct intel_qgv_point { @@ -199,6 +199,12 @@ static const struct intel_sa_info tgl_sa_info = { .displayrtids = 256, }; +static const struct intel_sa_info rkl_sa_info = { + .deburst = 16, + .deprogbwlimit = 20, /* GB/s */ + .displayrtids = 128, +}; + static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) { struct intel_qgv_info qi = {}; @@ -309,7 +315,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (IS_GEN(dev_priv, 12)) + if (IS_ROCKETLAKE(dev_priv)) + icl_get_bw_info(dev_priv, &rkl_sa_info); + else if (IS_GEN(dev_priv, 12)) icl_get_bw_info(dev_priv, &tgl_sa_info); else if (IS_GEN(dev_priv, 11)) icl_get_bw_info(dev_priv, &icl_sa_info); @@ -420,6 +428,141 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state) return to_intel_bw_state(bw_state); } +int skl_bw_calc_min_cdclk(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_bw_state *new_bw_state = NULL; + struct intel_bw_state *old_bw_state = NULL; + const struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int max_bw = 0; + int slice_id; + enum pipe pipe; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + enum plane_id plane_id; + struct intel_dbuf_bw *crtc_bw; + + new_bw_state = intel_atomic_get_bw_state(state); + if (IS_ERR(new_bw_state)) + return PTR_ERR(new_bw_state); + + old_bw_state = intel_atomic_get_old_bw_state(state); + + crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe]; + + memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw)); + + if (!crtc_state->hw.active) + continue; + + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_ddb_entry *plane_alloc = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + const struct skl_ddb_entry *uv_plane_alloc = + &crtc_state->wm.skl.plane_ddb_uv[plane_id]; + unsigned int data_rate = crtc_state->data_rate[plane_id]; + unsigned int dbuf_mask = 0; + + dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc); + dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc); + + /* + * FIXME: To calculate that more properly we probably + * need to to split per plane data_rate into data_rate_y + * and data_rate_uv for multiplanar formats in order not + * to get accounted those twice if they happen to reside + * on different slices. + * However for pre-icl this would work anyway because + * we have only single slice and for icl+ uv plane has + * non-zero data rate. + * So in worst case those calculation are a bit + * pessimistic, which shouldn't pose any significant + * problem anyway. + */ + for_each_dbuf_slice_in_mask(slice_id, dbuf_mask) + crtc_bw->used_bw[slice_id] += data_rate; + } + } + + if (!old_bw_state) + return 0; + + for_each_pipe(dev_priv, pipe) { + struct intel_dbuf_bw *crtc_bw; + + crtc_bw = &new_bw_state->dbuf_bw[pipe]; + + for_each_dbuf_slice(slice_id) { + /* + * Current experimental observations show that contrary + * to BSpec we get underruns once we exceed 64 * CDCLK + * for slices in total. + * As a temporary measure in order not to keep CDCLK + * bumped up all the time we calculate CDCLK according + * to this formula for overall bw consumed by slices. + */ + max_bw += crtc_bw->used_bw[slice_id]; + } + } + + new_bw_state->min_cdclk = max_bw / 64; + + if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) { + int ret = intel_atomic_lock_global_state(&new_bw_state->base); + + if (ret) + return ret; + } + + return 0; +} + +int intel_bw_calc_min_cdclk(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_bw_state *new_bw_state = NULL; + struct intel_bw_state *old_bw_state = NULL; + const struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int min_cdclk = 0; + enum pipe pipe; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + new_bw_state = intel_atomic_get_bw_state(state); + if (IS_ERR(new_bw_state)) + return PTR_ERR(new_bw_state); + + old_bw_state = intel_atomic_get_old_bw_state(state); + } + + if (!old_bw_state) + return 0; + + for_each_pipe(dev_priv, pipe) { + struct intel_cdclk_state *cdclk_state; + + cdclk_state = intel_atomic_get_new_cdclk_state(state); + if (!cdclk_state) + return 0; + + min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); + } + + new_bw_state->min_cdclk = min_cdclk; + + if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) { + int ret = intel_atomic_lock_global_state(&new_bw_state->base); + + if (ret) + return ret; + } + + return 0; +} + int intel_bw_atomic_check(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index bbcaaa73ec1b..46c6eecbd917 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -9,14 +9,20 @@ #include <drm/drm_atomic.h> #include "intel_display.h" +#include "intel_display_power.h" #include "intel_global_state.h" struct drm_i915_private; struct intel_atomic_state; struct intel_crtc_state; +struct intel_dbuf_bw { + int used_bw[I915_MAX_DBUF_SLICES]; +}; + struct intel_bw_state { struct intel_global_state base; + struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES]; /* * Contains a bit mask, used to determine, whether correspondent @@ -36,6 +42,8 @@ struct intel_bw_state { /* bitmask of active pipes */ u8 active_pipes; + + int min_cdclk; }; #define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base) @@ -56,5 +64,7 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state, const struct intel_crtc_state *crtc_state); int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, u32 points_mask); +int intel_bw_calc_min_cdclk(struct intel_atomic_state *state); +int skl_bw_calc_min_cdclk(struct intel_atomic_state *state); #endif /* __INTEL_BW_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 979a0241fdcb..45f7f33d1144 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -21,7 +21,10 @@ * DEALINGS IN THE SOFTWARE. */ +#include <linux/time.h> + #include "intel_atomic.h" +#include "intel_bw.h" #include "intel_cdclk.h" #include "intel_display_types.h" #include "intel_sideband.h" @@ -2094,6 +2097,7 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state) { struct intel_atomic_state *state = cdclk_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_bw_state *bw_state = NULL; struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; int min_cdclk, i; @@ -2106,6 +2110,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state) if (min_cdclk < 0) return min_cdclk; + bw_state = intel_atomic_get_bw_state(state); + if (IS_ERR(bw_state)) + return PTR_ERR(bw_state); + if (cdclk_state->min_cdclk[i] == min_cdclk) continue; @@ -2117,9 +2125,15 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state) } min_cdclk = cdclk_state->force_min_cdclk; - for_each_pipe(dev_priv, pipe) + for_each_pipe(dev_priv, pipe) { min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); + if (!bw_state) + continue; + + min_cdclk = max(bw_state->min_cdclk, min_cdclk); + } + return min_cdclk; } @@ -2700,29 +2714,59 @@ static int vlv_hrawclk(struct drm_i915_private *dev_priv) CCK_DISPLAY_REF_CLOCK_CONTROL); } -static int g4x_hrawclk(struct drm_i915_private *dev_priv) +static int i9xx_hrawclk(struct drm_i915_private *dev_priv) { u32 clkcfg; - /* hrawclock is 1/4 the FSB frequency */ - clkcfg = intel_de_read(dev_priv, CLKCFG); - switch (clkcfg & CLKCFG_FSB_MASK) { - case CLKCFG_FSB_400: - return 100000; - case CLKCFG_FSB_533: - return 133333; - case CLKCFG_FSB_667: - return 166667; - case CLKCFG_FSB_800: - return 200000; - case CLKCFG_FSB_1067: - case CLKCFG_FSB_1067_ALT: - return 266667; - case CLKCFG_FSB_1333: - case CLKCFG_FSB_1333_ALT: - return 333333; - default: - return 133333; + /* + * hrawclock is 1/4 the FSB frequency + * + * Note that this only reads the state of the FSB + * straps, not the actual FSB frequency. Some BIOSen + * let you configure each independently. Ideally we'd + * read out the actual FSB frequency but sadly we + * don't know which registers have that information, + * and all the relevant docs have gone to bit heaven :( + */ + clkcfg = intel_de_read(dev_priv, CLKCFG) & CLKCFG_FSB_MASK; + + if (IS_MOBILE(dev_priv)) { + switch (clkcfg) { + case CLKCFG_FSB_400: + return 100000; + case CLKCFG_FSB_533: + return 133333; + case CLKCFG_FSB_667: + return 166667; + case CLKCFG_FSB_800: + return 200000; + case CLKCFG_FSB_1067: + return 266667; + case CLKCFG_FSB_1333: + return 333333; + default: + MISSING_CASE(clkcfg); + return 133333; + } + } else { + switch (clkcfg) { + case CLKCFG_FSB_400_ALT: + return 100000; + case CLKCFG_FSB_533: + return 133333; + case CLKCFG_FSB_667: + return 166667; + case CLKCFG_FSB_800: + return 200000; + case CLKCFG_FSB_1067_ALT: + return 266667; + case CLKCFG_FSB_1333_ALT: + return 333333; + case CLKCFG_FSB_1600_ALT: + return 400000; + default: + return 133333; + } } } @@ -2743,8 +2787,8 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) freq = pch_rawclk(dev_priv); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) freq = vlv_hrawclk(dev_priv); - else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv)) - freq = g4x_hrawclk(dev_priv); + else if (INTEL_GEN(dev_priv) >= 3) + freq = i9xx_hrawclk(dev_priv); else /* no rawclk on other platforms, or no need to know it */ return 0; @@ -2760,25 +2804,30 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { if (INTEL_GEN(dev_priv) >= 12) { dev_priv->display.set_cdclk = bxt_set_cdclk; + dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = tgl_calc_voltage_level; dev_priv->cdclk.table = icl_cdclk_table; } else if (IS_ELKHARTLAKE(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; + dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = ehl_calc_voltage_level; dev_priv->cdclk.table = icl_cdclk_table; } else if (INTEL_GEN(dev_priv) >= 11) { dev_priv->display.set_cdclk = bxt_set_cdclk; + dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = icl_calc_voltage_level; dev_priv->cdclk.table = icl_cdclk_table; } else if (IS_CANNONLAKE(dev_priv)) { + dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = cnl_calc_voltage_level; dev_priv->cdclk.table = cnl_cdclk_table; } else if (IS_GEN9_LP(dev_priv)) { + dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = bxt_calc_voltage_level; @@ -2787,18 +2836,23 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) else dev_priv->cdclk.table = bxt_cdclk_table; } else if (IS_GEN9_BC(dev_priv)) { + dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; dev_priv->display.set_cdclk = skl_set_cdclk; dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk; } else if (IS_BROADWELL(dev_priv)) { + dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; dev_priv->display.set_cdclk = bdw_set_cdclk; dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk; } else if (IS_CHERRYVIEW(dev_priv)) { + dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; dev_priv->display.set_cdclk = chv_set_cdclk; dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk; } else if (IS_VALLEYVIEW(dev_priv)) { + dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; dev_priv->display.set_cdclk = vlv_set_cdclk; dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk; } else { + dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk; } diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 98ece9cd7cdd..945bb03bdd4d 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -714,16 +714,16 @@ static void bdw_load_lut_10(struct intel_crtc *crtc, intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); } -static void ivb_load_lut_ext_max(struct intel_crtc *crtc) +static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; /* Program the max register to clamp values > 1.0. */ - intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16); - intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16); - intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16); + intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16); + intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16); + intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16); /* * Program the gc max 2 register to clamp values > 1.0. @@ -731,15 +731,13 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc) * from 3.0 to 7.0 */ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { - intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 0), + intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16); - intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 1), + intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16); - intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 2), + intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16); } - - intel_dsb_put(dsb); } static void ivb_load_luts(const struct intel_crtc_state *crtc_state) @@ -753,7 +751,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state) } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) { ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); - ivb_load_lut_ext_max(crtc); + ivb_load_lut_ext_max(crtc_state); ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(512)); } else { @@ -761,7 +759,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state) ivb_load_lut_10(crtc, blob, PAL_PREC_INDEX_VALUE(0)); - ivb_load_lut_ext_max(crtc); + ivb_load_lut_ext_max(crtc_state); } } @@ -776,7 +774,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state) } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) { bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); - ivb_load_lut_ext_max(crtc); + ivb_load_lut_ext_max(crtc_state); bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(512)); } else { @@ -784,7 +782,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state) bdw_load_lut_10(crtc, blob, PAL_PREC_INDEX_VALUE(0)); - ivb_load_lut_ext_max(crtc); + ivb_load_lut_ext_max(crtc_state); } } @@ -877,7 +875,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state) ilk_load_lut_8(crtc, gamma_lut); } else { bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); - ivb_load_lut_ext_max(crtc); + ivb_load_lut_ext_max(crtc_state); } } @@ -900,14 +898,12 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state, const struct drm_color_lut *color) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; /* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */ - intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red); - intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green); - intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue); - intel_dsb_put(dsb); + intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red); + intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green); + intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue); } static void @@ -916,7 +912,6 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_property_blob *blob = crtc_state->hw.gamma_lut; const struct drm_color_lut *lut = blob->data; - struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; int i; @@ -927,19 +922,17 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) * 9 entries, corresponding to values 0, 1/(8 * 128 * 256), * 2/(8 * 128 * 256) ... 8/(8 * 128 * 256). */ - intel_dsb_reg_write(dsb, PREC_PAL_MULTI_SEG_INDEX(pipe), + intel_dsb_reg_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); for (i = 0; i < 9; i++) { const struct drm_color_lut *entry = &lut[i]; - intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe), + intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe), ilk_lut_12p4_ldw(entry)); - intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe), + intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe), ilk_lut_12p4_udw(entry)); } - - intel_dsb_put(dsb); } static void @@ -949,7 +942,6 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) const struct drm_property_blob *blob = crtc_state->hw.gamma_lut; const struct drm_color_lut *lut = blob->data; const struct drm_color_lut *entry; - struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; int i; @@ -963,12 +955,13 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) * PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1], * seg2[0] being unused by the hardware. */ - intel_dsb_reg_write(dsb, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); + intel_dsb_reg_write(crtc_state, PREC_PAL_INDEX(pipe), + PAL_PREC_AUTO_INCREMENT); for (i = 1; i < 257; i++) { entry = &lut[i * 8]; - intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry)); - intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry)); } @@ -986,24 +979,22 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) */ for (i = 0; i < 256; i++) { entry = &lut[i * 8 * 128]; - intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry)); - intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry)); } /* The last entry in the LUT is to be programmed in GCMAX */ entry = &lut[256 * 8 * 128]; icl_load_gcmax(crtc_state, entry); - ivb_load_lut_ext_max(crtc); - intel_dsb_put(dsb); + ivb_load_lut_ext_max(crtc_state); } static void icl_load_luts(const struct intel_crtc_state *crtc_state) { const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_dsb *dsb = intel_dsb_get(crtc); if (crtc_state->hw.degamma_lut) glk_load_degamma_lut(crtc_state); @@ -1018,11 +1009,10 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state) break; default: bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); - ivb_load_lut_ext_max(crtc); + ivb_load_lut_ext_max(crtc_state); } - intel_dsb_commit(dsb); - intel_dsb_put(dsb); + intel_dsb_commit(crtc_state); } static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color) diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 9ff05ec12115..77b04bb3ec62 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -181,11 +181,25 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv) intel_de_write(dev_priv, CHICKEN_MISC_2, val); } +static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy) +{ + /* + * Some platforms only expect PHY_MISC to be programmed for PHY-A and + * PHY-B and may not even have instances of the register for the + * other combo PHY's. + */ + if (IS_ELKHARTLAKE(i915) || + IS_ROCKETLAKE(i915)) + return phy < PHY_C; + + return true; +} + static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv, enum phy phy) { /* The PHY C added by EHL has no PHY_MISC register */ - if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C) + if (!has_phy_misc(dev_priv, phy)) return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT; else return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) & @@ -220,6 +234,27 @@ static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915) return false; } +static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy) +{ + /* + * Certain PHYs are connected to compensation resistors and act + * as masters to other PHYs. + * + * ICL,TGL: + * A(master) -> B(slave), C(slave) + * RKL: + * A(master) -> B(slave) + * C(master) -> D(slave) + * + * We must set the IREFGEN bit for any PHY acting as a master + * to another PHY. + */ + if (IS_ROCKETLAKE(dev_priv) && phy == PHY_C) + return true; + + return phy == PHY_A; +} + static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, enum phy phy) { @@ -231,7 +266,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, ret = cnl_verify_procmon_ref_values(dev_priv, phy); - if (phy == PHY_A) { + if (phy_is_master(dev_priv, phy)) { ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy), IREFGEN, IREFGEN); @@ -317,12 +352,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) continue; } - /* - * Although EHL adds a combo PHY C, there's no PHY_MISC - * register for it and no need to program the - * DE_IO_COMP_PWR_DOWN setting on PHY C. - */ - if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C) + if (!has_phy_misc(dev_priv, phy)) goto skip_phy_misc; /* @@ -347,7 +377,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) skip_phy_misc: cnl_set_procmon_ref_values(dev_priv, phy); - if (phy == PHY_A) { + if (phy_is_master(dev_priv, phy)) { val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy)); val |= IREFGEN; intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val); @@ -376,12 +406,7 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) "Combo PHY %c HW state changed unexpectedly\n", phy_name(phy)); - /* - * Although EHL adds a combo PHY C, there's no PHY_MISC - * register for it and no need to program the - * DE_IO_COMP_PWR_DOWN setting on PHY C. - */ - if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C) + if (!has_phy_misc(dev_priv, phy)) goto skip_phy_misc; val = intel_de_read(dev_priv, ICL_PHY_MISC(phy)); diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 2f5b9a4baafd..5b4510ce5693 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -833,7 +833,7 @@ intel_crt_detect(struct drm_connector *connector, connector->base.id, connector->name, force); - if (i915_modparams.load_detect_test) { + if (dev_priv->params.load_detect_test) { wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); goto load_detect; @@ -889,7 +889,7 @@ load_detect: else if (INTEL_GEN(dev_priv) < 4) status = intel_crt_load_detect(crt, to_intel_crtc(connector->state->crtc)->pipe); - else if (i915_modparams.load_detect_test) + else if (dev_priv->params.load_detect_test) status = connector_status_disconnected; else status = connector_status_unknown; diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c index 3112572cfb7d..f22a7645c249 100644 --- a/drivers/gpu/drm/i915/display/intel_csr.c +++ b/drivers/gpu/drm/i915/display/intel_csr.c @@ -40,6 +40,10 @@ #define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE +#define RKL_CSR_PATH "i915/rkl_dmc_ver2_01.bin" +#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 1) +MODULE_FIRMWARE(RKL_CSR_PATH); + #define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin" #define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6) #define TGL_CSR_MAX_FW_SIZE 0x6000 @@ -682,7 +686,11 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) */ intel_csr_runtime_pm_get(dev_priv); - if (INTEL_GEN(dev_priv) >= 12) { + if (IS_ROCKETLAKE(dev_priv)) { + csr->fw_path = RKL_CSR_PATH; + csr->required_version = RKL_CSR_VERSION_REQUIRED; + csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE; + } else if (INTEL_GEN(dev_priv) >= 12) { csr->fw_path = TGL_CSR_PATH; csr->required_version = TGL_CSR_VERSION_REQUIRED; /* Allow to load fw via parameter using the last known size */ @@ -699,7 +707,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) csr->fw_path = GLK_CSR_PATH; csr->required_version = GLK_CSR_VERSION_REQUIRED; csr->max_fw_size = GLK_CSR_MAX_FW_SIZE; - } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { + } else if (IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv) || + IS_COMETLAKE(dev_priv)) { csr->fw_path = KBL_CSR_PATH; csr->required_version = KBL_CSR_VERSION_REQUIRED; csr->max_fw_size = KBL_CSR_MAX_FW_SIZE; @@ -713,15 +723,15 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) csr->max_fw_size = BXT_CSR_MAX_FW_SIZE; } - if (i915_modparams.dmc_firmware_path) { - if (strlen(i915_modparams.dmc_firmware_path) == 0) { + if (dev_priv->params.dmc_firmware_path) { + if (strlen(dev_priv->params.dmc_firmware_path) == 0) { csr->fw_path = NULL; drm_info(&dev_priv->drm, "Disabling CSR firmware and runtime PM\n"); return; } - csr->fw_path = i915_modparams.dmc_firmware_path; + csr->fw_path = dev_priv->params.dmc_firmware_path; /* Bypass version check for firmware override. */ csr->required_version = 0; } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 0575a1eea2a1..025d4052f6f8 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -639,11 +639,25 @@ struct tgl_dkl_phy_ddi_buf_trans { static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans[] = { /* VS pre-emp Non-trans mV Pre-emph dB */ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ - { 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */ - { 0x2, 0x0, 0x0b }, /* 0 2 400mV 6 dB */ + { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */ + { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */ + { 0x0, 0x0, 0x18 }, /* 0 3 400mV 9.5 dB */ + { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ + { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */ + { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ + { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ + { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */ + { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */ +}; + +static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_dp_ddi_trans_hbr2[] = { + /* VS pre-emp Non-trans mV Pre-emph dB */ + { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ + { 0x5, 0x0, 0x05 }, /* 0 1 400mV 3.5 dB */ + { 0x2, 0x0, 0x0B }, /* 0 2 400mV 6 dB */ { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ - { 0x2, 0x0, 0x03 }, /* 1 1 600mV 3.5 dB */ + { 0x2, 0x0, 0x08 }, /* 1 1 600mV 3.5 dB */ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */ @@ -722,10 +736,14 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) static const struct ddi_buf_trans * kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) { - if (IS_KBL_ULX(dev_priv) || IS_CFL_ULX(dev_priv)) { + if (IS_KBL_ULX(dev_priv) || + IS_CFL_ULX(dev_priv) || + IS_CML_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); return kbl_y_ddi_translations_dp; - } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) { + } else if (IS_KBL_ULT(dev_priv) || + IS_CFL_ULT(dev_priv) || + IS_CML_ULT(dev_priv)) { *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp); return kbl_u_ddi_translations_dp; } else { @@ -738,12 +756,16 @@ static const struct ddi_buf_trans * skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { if (dev_priv->vbt.edp.low_vswing) { - if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || - IS_CFL_ULX(dev_priv)) { + if (IS_SKL_ULX(dev_priv) || + IS_KBL_ULX(dev_priv) || + IS_CFL_ULX(dev_priv) || + IS_CML_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); return skl_y_ddi_translations_edp; - } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) || - IS_CFL_ULT(dev_priv)) { + } else if (IS_SKL_ULT(dev_priv) || + IS_KBL_ULT(dev_priv) || + IS_CFL_ULT(dev_priv) || + IS_CML_ULT(dev_priv)) { *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); return skl_u_ddi_translations_edp; } else { @@ -752,7 +774,9 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) } } - if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) + if (IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv) || + IS_COMETLAKE(dev_priv)) return kbl_get_buf_trans_dp(dev_priv, n_entries); else return skl_get_buf_trans_dp(dev_priv, n_entries); @@ -761,8 +785,10 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) static const struct ddi_buf_trans * skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) { - if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || - IS_CFL_ULX(dev_priv)) { + if (IS_SKL_ULX(dev_priv) || + IS_KBL_ULX(dev_priv) || + IS_CFL_ULX(dev_priv) || + IS_CML_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); return skl_y_ddi_translations_hdmi; } else { @@ -784,7 +810,9 @@ static const struct ddi_buf_trans * intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv, enum port port, int *n_entries) { - if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { + if (IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv) || + IS_COMETLAKE(dev_priv)) { const struct ddi_buf_trans *ddi_translations = kbl_get_buf_trans_dp(dev_priv, n_entries); *n_entries = skl_buf_trans_num_entries(port, *n_entries); @@ -1014,6 +1042,22 @@ tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, return tgl_combo_phy_ddi_translations_dp_hbr; } +static const struct tgl_dkl_phy_ddi_buf_trans * +tgl_get_dkl_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, + int *n_entries) +{ + if (type == INTEL_OUTPUT_HDMI) { + *n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); + return tgl_dkl_phy_hdmi_ddi_trans; + } else if (rate > 270000) { + *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans_hbr2); + return tgl_dkl_phy_dp_ddi_trans_hbr2; + } + + *n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); + return tgl_dkl_phy_dp_ddi_trans; +} + static int intel_ddi_hdmi_level(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -1025,7 +1069,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder) tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, &n_entries); else - n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); + tgl_get_dkl_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, + &n_entries); default_entry = n_entries - 1; } else if (INTEL_GEN(dev_priv) == 11) { if (intel_phy_is_combo(dev_priv, phy)) @@ -1608,7 +1653,6 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - u32 ctl; if (INTEL_GEN(dev_priv) >= 11) { enum transcoder master_transcoder = crtc_state->master_transcoder; @@ -1626,10 +1670,9 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2); } - ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) - ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl); + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), + intel_ddi_transcoder_func_reg_val_get(encoder, + crtc_state)); } /* @@ -2095,10 +2138,10 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, ddi_translations[level].deemphasis); } -u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) +static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp) { + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = encoder->port; enum phy phy = intel_port_to_phy(dev_priv, port); int n_entries; @@ -2108,7 +2151,8 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) tgl_get_combo_buf_trans(dev_priv, encoder->type, intel_dp->link_rate, &n_entries); else - n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); + tgl_get_dkl_buf_trans(dev_priv, encoder->type, + intel_dp->link_rate, &n_entries); } else if (INTEL_GEN(dev_priv) == 11) { if (IS_ELKHARTLAKE(dev_priv)) ehl_get_combo_buf_trans(dev_priv, encoder->type, @@ -2151,19 +2195,9 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) * used on all DDI platforms. Should that change we need to * rethink this code. */ -u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing) +static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp) { - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: - return DP_TRAIN_PRE_EMPH_LEVEL_3; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: - return DP_TRAIN_PRE_EMPH_LEVEL_2; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: - return DP_TRAIN_PRE_EMPH_LEVEL_1; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: - default: - return DP_TRAIN_PRE_EMPH_LEVEL_0; - } + return DP_TRAIN_PRE_EMPH_LEVEL_3; } static void cnl_ddi_vswing_program(struct intel_encoder *encoder, @@ -2585,15 +2619,17 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations; u32 n_entries, val, ln, dpcnt_mask, dpcnt_val; + int rate = 0; if (type == INTEL_OUTPUT_HDMI) { - n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); - ddi_translations = tgl_dkl_phy_hdmi_ddi_trans; - } else { - n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); - ddi_translations = tgl_dkl_phy_dp_ddi_trans; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + rate = intel_dp->link_rate; } + ddi_translations = tgl_get_dkl_buf_trans(dev_priv, encoder->type, rate, + &n_entries); + if (level >= n_entries) level = n_entries - 1; @@ -3344,11 +3380,10 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct intel_hdmi *intel_hdmi = &dig_port->hdmi; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int level = intel_ddi_hdmi_level(encoder); - struct intel_digital_port *dig_port = enc_to_dig_port(encoder); intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); intel_ddi_clk_select(encoder, crtc_state); @@ -3375,9 +3410,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, intel_ddi_enable_pipe_clock(encoder, crtc_state); - intel_dig_port->set_infoframes(encoder, - crtc_state->has_infoframe, - crtc_state, conn_state); + dig_port->set_infoframes(encoder, + crtc_state->has_infoframe, + crtc_state, conn_state); } static void intel_ddi_pre_enable(struct intel_atomic_state *state, @@ -4155,11 +4190,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder))) return; - if (INTEL_GEN(dev_priv) >= 12) { - intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(cpu_transcoder); - intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(cpu_transcoder); - } - intel_dsc_get_config(encoder, pipe_config); temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); @@ -4261,6 +4291,16 @@ void intel_ddi_get_config(struct intel_encoder *encoder, break; } + if (INTEL_GEN(dev_priv) >= 12) { + enum transcoder transcoder = + intel_dp_mst_is_slave_trans(pipe_config) ? + pipe_config->mst_master_transcoder : + pipe_config->cpu_transcoder; + + intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder); + intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder); + } + pipe_config->has_audio = intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder); @@ -4523,6 +4563,9 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) else intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels; + intel_dig_port->dp.voltage_max = intel_ddi_dp_voltage_max; + intel_dig_port->dp.preemph_max = intel_ddi_dp_preemph_max; + if (INTEL_GEN(dev_priv) < 12) { intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index fbdf8ddde486..077e9dbbe367 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -42,9 +42,6 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state); u32 bxt_signal_levels(struct intel_dp *intel_dp); u32 ddi_signal_levels(struct intel_dp *intel_dp); -u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder); -u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, - u8 voltage_swing); int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, bool enable); void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 26996e1839e2..84e2a17b5ecb 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -35,6 +35,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_uapi.h> +#include <drm/drm_damage_helper.h> #include <drm/drm_dp_helper.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> @@ -4823,11 +4824,18 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) { - if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) + switch (plane_state->hw.color_encoding) { + case DRM_COLOR_YCBCR_BT709: plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; - else - plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709; - + break; + case DRM_COLOR_YCBCR_BT2020: + plane_color_ctl |= + PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020; + break; + default: + plane_color_ctl |= + PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601; + } if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; } else if (fb->format->is_yuv) { @@ -4890,7 +4898,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) int ret; /* reset doesn't touch the display */ - if (!i915_modparams.force_reset_modeset_test && + if (!dev_priv->params.force_reset_modeset_test && !gpu_reset_clobbers_display(dev_priv)) return; @@ -6435,8 +6443,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s * We can't read out IPS on broadwell, assume the worst and * forcibly enable IPS on the first fastset. */ - if (new_crtc_state->update_pipe && - old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED) + if (new_crtc_state->update_pipe && old_crtc_state->inherited) return true; return !old_crtc_state->ips_enabled; @@ -7223,30 +7230,33 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) { if (phy == PHY_NONE) return false; - - if (IS_ELKHARTLAKE(dev_priv)) + else if (IS_ROCKETLAKE(dev_priv)) + return phy <= PHY_D; + else if (IS_ELKHARTLAKE(dev_priv)) return phy <= PHY_C; - - if (INTEL_GEN(dev_priv) >= 11) + else if (INTEL_GEN(dev_priv) >= 11) return phy <= PHY_B; - - return false; + else + return false; } bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) { - if (INTEL_GEN(dev_priv) >= 12) + if (IS_ROCKETLAKE(dev_priv)) + return false; + else if (INTEL_GEN(dev_priv) >= 12) return phy >= PHY_D && phy <= PHY_I; - - if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) + else if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) return phy >= PHY_C && phy <= PHY_F; - - return false; + else + return false; } enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) { - if (IS_ELKHARTLAKE(i915) && port == PORT_D) + if (IS_ROCKETLAKE(i915) && port >= PORT_D) + return (enum phy)port - 1; + else if (IS_ELKHARTLAKE(i915) && port == PORT_D) return PHY_A; return (enum phy)port; @@ -7517,6 +7527,10 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, intel_crtc_vblank_on(new_crtc_state); intel_encoders_enable(state, crtc); + + /* prevents spurious underruns */ + if (IS_GEN(dev_priv, 2)) + intel_wait_for_vblank(dev_priv, pipe); } static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) @@ -7590,6 +7604,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, to_intel_bw_state(dev_priv->bw_obj.state); struct intel_cdclk_state *cdclk_state = to_intel_cdclk_state(dev_priv->cdclk.obj.state); + struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(dev_priv->dbuf.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); enum intel_display_power_domain domain; @@ -7663,6 +7679,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, cdclk_state->min_voltage_level[pipe] = 0; cdclk_state->active_pipes &= ~BIT(pipe); + dbuf_state->active_pipes &= ~BIT(pipe); + bw_state->data_rate[pipe] = 0; bw_state->num_active_planes[pipe] = 0; } @@ -7880,7 +7898,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) if (!hsw_crtc_supports_ips(crtc)) return false; - if (!i915_modparams.enable_ips) + if (!dev_priv->params.enable_ips) return false; if (crtc_state->pipe_bpp > 24) @@ -8151,8 +8169,8 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { - if (i915_modparams.panel_use_ssc >= 0) - return i915_modparams.panel_use_ssc != 0; + if (dev_priv->params.panel_use_ssc >= 0) + return dev_priv->params.panel_use_ssc != 0; return dev_priv->vbt.lvds_use_ssc && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } @@ -8897,7 +8915,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, mode->clock = pipe_config->hw.adjusted_mode.crtc_clock; - mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_set_name(mode); } @@ -10894,7 +10911,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; - unsigned long panel_transcoder_mask = 0; + unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP); unsigned long enabled_panel_transcoders = 0; enum transcoder panel_transcoder; intel_wakeref_t wf; @@ -10904,9 +10921,6 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); - if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP)) - panel_transcoder_mask |= BIT(TRANSCODER_EDP); - /* * The pipe->transcoder mapping is fixed with the exception of the eDP * and DSI transcoders handled below. @@ -10917,9 +10931,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, * XXX: Do intel_display_power_get_if_enabled before reading this (for * consistency and less surprising code; it's in always on power). */ - for_each_set_bit(panel_transcoder, - &panel_transcoder_mask, - ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) { + for_each_cpu_transcoder_masked(dev_priv, panel_transcoder, + panel_transcoder_mask) { bool force_thru = false; enum pipe trans_pipe; @@ -12511,7 +12524,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) continue; for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { - if (!icl_is_nv12_y_plane(linked->id)) + if (!icl_is_nv12_y_plane(dev_priv, linked->id)) continue; if (crtc_state->active_planes & BIT(linked->id)) @@ -12557,6 +12570,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) plane_state->cus_ctl |= PLANE_CUS_PLANE_7; else if (linked->id == PLANE_SPRITE4) plane_state->cus_ctl |= PLANE_CUS_PLANE_6; + else if (linked->id == PLANE_SPRITE3) + plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL; + else if (linked->id == PLANE_SPRITE2) + plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL; else MISSING_CASE(linked->id); } @@ -12580,12 +12597,15 @@ static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + int linetime_wm; if (!crtc_state->hw.enable) return 0; - return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, - adjusted_mode->crtc_clock); + linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, + adjusted_mode->crtc_clock); + + return min(linetime_wm, 0x1ff); } static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, @@ -12593,12 +12613,15 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + int linetime_wm; if (!crtc_state->hw.enable) return 0; - return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, - cdclk_state->logical.cdclk); + linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, + cdclk_state->logical.cdclk); + + return min(linetime_wm, 0x1ff); } static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) @@ -12607,7 +12630,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - u16 linetime_wm; + int linetime_wm; if (!crtc_state->hw.enable) return 0; @@ -12619,7 +12642,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled) linetime_wm /= 2; - return linetime_wm; + return min(linetime_wm, 0x1ff); } static int hsw_compute_linetime_wm(struct intel_atomic_state *state, @@ -13582,8 +13605,8 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, static bool fastboot_enabled(struct drm_i915_private *dev_priv) { - if (i915_modparams.fastboot != -1) - return i915_modparams.fastboot; + if (dev_priv->params.fastboot != -1) + return dev_priv->params.fastboot; /* Enable fastboot by default on Skylake and newer */ if (INTEL_GEN(dev_priv) >= 9) @@ -13607,8 +13630,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, bool ret = true; u32 bp_gamma = 0; bool fixup_inherited = fastset && - (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) && - !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED); + current_config->inherited && !pipe_config->inherited; if (fixup_inherited && !fastboot_enabled(dev_priv)) { drm_dbg_kms(&dev_priv->drm, @@ -14019,10 +14041,10 @@ static void verify_wm_state(struct intel_crtc *crtc, hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); if (INTEL_GEN(dev_priv) >= 11 && - hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask) + hw_enabled_slices != dev_priv->dbuf.enabled_slices) drm_err(&dev_priv->drm, "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", - dev_priv->enabled_dbuf_slices_mask, + dev_priv->dbuf.enabled_slices, hw_enabled_slices); /* planes */ @@ -14416,6 +14438,8 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) drm_calc_timestamping_constants(&crtc->base, adjusted_mode); + crtc->mode_flags = crtc_state->mode_flags; + /* * The scanline counter increments at the leading edge of hsync. * @@ -14563,20 +14587,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state) state->modeset = true; state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes); - state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes; - - if (state->active_pipe_changes) { + if (state->active_pipes != dev_priv->active_pipes) { ret = _intel_atomic_lock_global_state(state); if (ret) return ret; } - ret = intel_modeset_calc_cdclk(state); - if (ret) - return ret; - - intel_modeset_clear_plls(state); - if (IS_HASWELL(dev_priv)) return hsw_mode_set_planes_workaround(state); @@ -14653,11 +14669,10 @@ static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) /* See {hsw,vlv,ivb}_plane_ratio() */ return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || - IS_IVYBRIDGE(dev_priv); + IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11); } -static int intel_atomic_check_planes(struct intel_atomic_state *state, - bool *need_cdclk_calc) +static int intel_atomic_check_planes(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *old_crtc_state, *new_crtc_state; @@ -14699,7 +14714,13 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state, old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); - if (hweight8(old_active_planes) == hweight8(new_active_planes)) + /* + * Not only the number of planes, but if the plane configuration had + * changed might already mean we need to recompute min CDCLK, + * because different planes might consume different amount of Dbuf bandwidth + * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor + */ + if (old_active_planes == new_active_planes) continue; ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); @@ -14707,6 +14728,21 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state, return ret; } + return 0; +} + +static int intel_atomic_check_cdclk(struct intel_atomic_state *state, + bool *need_cdclk_calc) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_cdclk_state *new_cdclk_state; + struct intel_plane_state *plane_state; + struct intel_bw_state *new_bw_state; + struct intel_plane *plane; + int min_cdclk = 0; + enum pipe pipe; + int ret; + int i; /* * active_planes bitmask has been updated, and potentially * affected planes are part of the state. We can now @@ -14718,6 +14754,30 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state, return ret; } + new_cdclk_state = intel_atomic_get_new_cdclk_state(state); + + if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed) + *need_cdclk_calc = true; + + ret = dev_priv->display.bw_calc_min_cdclk(state); + if (ret) + return ret; + + new_bw_state = intel_atomic_get_new_bw_state(state); + + if (!new_cdclk_state || !new_bw_state) + return 0; + + for_each_pipe(dev_priv, pipe) { + min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk); + + /* + * Currently do this change only if we need to increase + */ + if (new_bw_state->min_cdclk > min_cdclk) + *need_cdclk_calc = true; + } + return 0; } @@ -14769,16 +14829,13 @@ static int intel_atomic_check(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_atomic_state *state = to_intel_atomic_state(_state); struct intel_crtc_state *old_crtc_state, *new_crtc_state; - struct intel_cdclk_state *new_cdclk_state; struct intel_crtc *crtc; int ret, i; bool any_ms = false; - /* Catch I915_MODE_FLAG_INHERITED */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (new_crtc_state->uapi.mode.private_flags != - old_crtc_state->uapi.mode.private_flags) + if (new_crtc_state->inherited != old_crtc_state->inherited) new_crtc_state->uapi.mode_changed = true; } @@ -14880,14 +14937,10 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; - ret = intel_atomic_check_planes(state, &any_ms); + ret = intel_atomic_check_planes(state); if (ret) goto fail; - new_cdclk_state = intel_atomic_get_new_cdclk_state(state); - if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed) - any_ms = true; - /* * distrust_bios_wm will force a full dbuf recomputation * but the hardware state will only get updated accordingly @@ -14908,10 +14961,6 @@ static int intel_atomic_check(struct drm_device *dev, goto fail; } - ret = intel_atomic_check_crtcs(state); - if (ret) - goto fail; - intel_fbc_choose_crtc(dev_priv, state); ret = calc_watermark_data(state); if (ret) @@ -14921,6 +14970,22 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; + ret = intel_atomic_check_cdclk(state, &any_ms); + if (ret) + goto fail; + + if (any_ms) { + ret = intel_modeset_calc_cdclk(state); + if (ret) + return ret; + + intel_modeset_clear_plls(state); + } + + ret = intel_atomic_check_crtcs(state); + if (ret) + goto fail; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!needs_modeset(new_crtc_state) && @@ -14951,8 +15016,24 @@ static int intel_atomic_check(struct drm_device *dev, static int intel_atomic_prepare_commit(struct intel_atomic_state *state) { - return drm_atomic_helper_prepare_planes(state->base.dev, - &state->base); + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int i, ret; + + ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); + if (ret < 0) + return ret; + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + bool mode_changed = needs_modeset(crtc_state); + + if (mode_changed || crtc_state->update_pipe || + crtc_state->uapi.color_mgmt_changed) { + intel_dsb_prepare(crtc_state); + } + } + + return 0; } u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) @@ -15124,7 +15205,7 @@ static void intel_update_crtc(struct intel_atomic_state *state, * of enabling them on the CRTC's first fastset. */ if (new_crtc_state->update_pipe && !modeset && - old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) + old_crtc_state->inherited) intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); } @@ -15216,29 +15297,6 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state) } } -static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask; - u8 required_slices = state->enabled_dbuf_slices_mask; - u8 slices_union = hw_enabled_slices | required_slices; - - /* If 2nd DBuf slice required, enable it here */ - if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices) - icl_dbuf_slices_update(dev_priv, slices_union); -} - -static void icl_dbuf_slice_post_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask; - u8 required_slices = state->enabled_dbuf_slices_mask; - - /* If 2nd DBuf slice is no more required disable it */ - if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices) - icl_dbuf_slices_update(dev_priv, required_slices); -} - static void skl_commit_modeset_enables(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); @@ -15405,15 +15463,27 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat &wait_reset); } +static void intel_cleanup_dsbs(struct intel_atomic_state *state) +{ + struct intel_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) + intel_dsb_cleanup(old_crtc_state); +} + static void intel_atomic_cleanup_work(struct work_struct *work) { - struct drm_atomic_state *state = - container_of(work, struct drm_atomic_state, commit_work); - struct drm_i915_private *i915 = to_i915(state->dev); + struct intel_atomic_state *state = + container_of(work, struct intel_atomic_state, base.commit_work); + struct drm_i915_private *i915 = to_i915(state->base.dev); - drm_atomic_helper_cleanup_planes(&i915->drm, state); - drm_atomic_helper_commit_cleanup_done(state); - drm_atomic_state_put(state); + intel_cleanup_dsbs(state); + drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); + drm_atomic_helper_commit_cleanup_done(&state->base); + drm_atomic_state_put(&state->base); intel_atomic_helper_free_state(i915); } @@ -15479,9 +15549,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) if (state->modeset) intel_encoders_update_prepare(state); - /* Enable all new slices, we might need */ - if (state->modeset) - icl_dbuf_slice_pre_update(state); + intel_dbuf_pre_plane_update(state); /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.commit_modeset_enables(state); @@ -15536,9 +15604,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) dev_priv->display.optimize_watermarks(state, crtc); } - /* Disable all slices, we don't need */ - if (state->modeset) - icl_dbuf_slice_post_update(state); + intel_dbuf_post_plane_update(state); for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { intel_post_plane_update(state, crtc); @@ -15547,6 +15613,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) modeset_put_power_domains(dev_priv, put_domains[i]); intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); + + /* + * DSB cleanup is done in cleanup_work aligning with framebuffer + * cleanup. So copy and reset the dsb structure to sync with + * commit_done and later do dsb cleanup in cleanup_work. + */ + old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb); } /* Underruns don't always raise interrupts, so check manually */ @@ -15696,8 +15769,15 @@ static int intel_atomic_commit(struct drm_device *dev, intel_atomic_swap_global_state(state); if (ret) { + struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + int i; + i915_sw_fence_commit(&state->commit_ready); + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) + intel_dsb_cleanup(new_crtc_state); + drm_atomic_helper_cleanup_planes(dev, &state->base); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; @@ -16417,6 +16497,9 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; drm_plane_create_zpos_immutable_property(&cursor->base, zpos); + if (INTEL_GEN(dev_priv) >= 12) + drm_plane_enable_fb_damage_clips(&cursor->base); + drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); return cursor; @@ -16757,7 +16840,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) return; - if (INTEL_GEN(dev_priv) >= 12) { + if (IS_ROCKETLAKE(dev_priv)) { + intel_ddi_init(dev_priv, PORT_A); + intel_ddi_init(dev_priv, PORT_B); + intel_ddi_init(dev_priv, PORT_D); /* DDI TC1 */ + intel_ddi_init(dev_priv, PORT_E); /* DDI TC2 */ + } else if (INTEL_GEN(dev_priv) >= 12) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_D); @@ -17432,23 +17520,35 @@ void intel_modeset_init_hw(struct drm_i915_private *i915) { struct intel_cdclk_state *cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state); + struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->dbuf.obj.state); intel_update_cdclk(i915); intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK"); cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; + + dbuf_state->enabled_slices = i915->dbuf.enabled_slices; } static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) { struct drm_plane *plane; - struct drm_crtc *crtc; + struct intel_crtc *crtc; - drm_for_each_crtc(crtc, state->dev) { - struct drm_crtc_state *crtc_state; + for_each_intel_crtc(state->dev, crtc) { + struct intel_crtc_state *crtc_state; - crtc_state = drm_atomic_get_crtc_state(state, crtc); + crtc_state = intel_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); + + if (crtc_state->hw.active) { + /* + * Preserve the inherited flag to avoid + * taking the full modeset path. + */ + crtc_state->inherited = true; + } } drm_for_each_plane(plane, state->dev) { @@ -17590,6 +17690,15 @@ retry: } if (crtc_state->hw.active) { + /* + * We've not yet detected sink capabilities + * (audio,infoframes,etc.) and thus we don't want to + * force a full state recomputation yet. We want that to + * happen only for the first real commit from userspace. + * So preserve the inherited flag for the time being. + */ + crtc_state->inherited = true; + ret = drm_atomic_add_affected_planes(state, &crtc->base); if (ret) goto out; @@ -17677,7 +17786,8 @@ static void intel_mode_config_init(struct drm_i915_private *i915) if (IS_I845G(i915) || IS_I865G(i915)) { mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; mode_config->cursor_height = 1023; - } else if (IS_GEN(i915, 2)) { + } else if (IS_I830(i915) || IS_I85X(i915) || + IS_I915G(i915) || IS_I915GM(i915)) { mode_config->cursor_width = 64; mode_config->cursor_height = 64; } else { @@ -17723,6 +17833,10 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) if (ret) return ret; + ret = intel_dbuf_init(i915); + if (ret) + return ret; + ret = intel_bw_init(i915); if (ret) return ret; @@ -18239,6 +18353,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct intel_cdclk_state *cdclk_state = to_intel_cdclk_state(dev_priv->cdclk.obj.state); + struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(dev_priv->dbuf.obj.state); enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; @@ -18269,7 +18385,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) enableddisabled(crtc_state->hw.active)); } - dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes; + dev_priv->active_pipes = cdclk_state->active_pipes = + dbuf_state->active_pipes = active_pipes; readout_plane_state(dev_priv); @@ -18360,7 +18477,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) * set a flag to indicate that a full recalculation is * needed on the next commit. */ - mode->private_flags = I915_MODE_FLAG_INHERITED; + crtc_state->inherited = true; intel_crtc_compute_pixel_rate(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 3a06f72c9859..f68007ff8a13 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -187,6 +187,13 @@ enum plane_id { for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ for_each_if((__crtc)->plane_ids_mask & BIT(__p)) +#define for_each_dbuf_slice_in_mask(__slice, __mask) \ + for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \ + for_each_if((BIT(__slice)) & (__mask)) + +#define for_each_dbuf_slice(__slice) \ + for_each_dbuf_slice_in_mask(__slice, BIT(I915_MAX_DBUF_SLICES) - 1) + enum port { PORT_NONE = -1, diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 70525623bcdf..d1cb48b3f462 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -125,7 +125,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "Enabled by kernel parameter: %s\n", - yesno(i915_modparams.enable_ips)); + yesno(dev_priv->params.enable_ips)); if (INTEL_GEN(dev_priv) >= 8) { seq_puts(m, "Currently: unknown\n"); @@ -1099,10 +1099,10 @@ static void drrs_status_per_crtc(struct seq_file *m, seq_puts(m, "\n\t\t"); if (drrs->refresh_rate_type == DRRS_HIGH_RR) { seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); - vrefresh = panel->fixed_mode->vrefresh; + vrefresh = drm_mode_vrefresh(panel->fixed_mode); } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); - vrefresh = panel->downclock_mode->vrefresh; + vrefresh = drm_mode_vrefresh(panel->downclock_mode); } else { seq_printf(m, "DRRS_State: Unknown(%d)\n", drrs->refresh_rate_type); @@ -2218,7 +2218,8 @@ int intel_connector_debugfs_add(struct drm_connector *connector) } if (INTEL_GEN(dev_priv) >= 10 && - (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && + !to_intel_connector(connector)->mst_port) || connector->connector_type == DRM_MODE_CONNECTOR_eDP)) debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root, connector, &i915_dsc_fec_support_fops); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 49998906cc61..8a277dfbc070 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -1161,7 +1161,7 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) { u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); - u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask; + u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; drm_WARN(&dev_priv->drm, hw_enabled_dbuf_slices != enabled_dbuf_slices, @@ -1943,22 +1943,29 @@ static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) static bool assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) { - return !WARN_ON(power_domains->async_put_domains[0] & - power_domains->async_put_domains[1]); + struct drm_i915_private *i915 = container_of(power_domains, + struct drm_i915_private, + power_domains); + return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & + power_domains->async_put_domains[1]); } static bool __async_put_domains_state_ok(struct i915_power_domains *power_domains) { + struct drm_i915_private *i915 = container_of(power_domains, + struct drm_i915_private, + power_domains); enum intel_display_power_domain domain; bool err = false; err |= !assert_async_put_domain_masks_disjoint(power_domains); - err |= WARN_ON(!!power_domains->async_put_wakeref != - !!__async_put_domains_mask(power_domains)); + err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != + !!__async_put_domains_mask(power_domains)); for_each_power_domain(domain, __async_put_domains_mask(power_domains)) - err |= WARN_ON(power_domains->domain_use_count[domain] != 1); + err |= drm_WARN_ON(&i915->drm, + power_domains->domain_use_count[domain] != 1); return !err; } @@ -2200,11 +2207,14 @@ static void queue_async_put_domains_work(struct i915_power_domains *power_domains, intel_wakeref_t wakeref) { - WARN_ON(power_domains->async_put_wakeref); + struct drm_i915_private *i915 = container_of(power_domains, + struct drm_i915_private, + power_domains); + drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); power_domains->async_put_wakeref = wakeref; - WARN_ON(!queue_delayed_work(system_unbound_wq, - &power_domains->async_put_work, - msecs_to_jiffies(100))); + drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, + &power_domains->async_put_work, + msecs_to_jiffies(100))); } static void @@ -2913,6 +2923,53 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) +#define RKL_PW_4_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define RKL_PW_3_POWER_DOMAINS ( \ + RKL_PW_4_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_AUX_E) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +/* + * There is no PW_2/PG_2 on RKL. + * + * RKL PW_1/PG_1 domains (under HW/DMC control): + * - DBUF function (note: registers are in PW0) + * - PIPE_A and its planes and VDSC/joining, except VGA + * - transcoder A + * - DDI_A and DDI_B + * - FBC + * + * RKL PW_0/PG_0 domains (under HW/DMC control): + * - PCI + * - clocks except port PLL + * - shared functions: + * * interrupts except pipe interrupts + * * MBus except PIPE_MBUS_DBOX_CTL + * * DBUF registers + * - central power except FBC + * - top-level GTC (DDI-level GTC is in the well associated with the DDI) + */ + +#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + RKL_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = i9xx_always_on_power_well_noop, @@ -4283,6 +4340,140 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }; +static const struct i915_power_well_desc rkl_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .always_on = true, + .domains = 0, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_1, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, + { + .name = "DC off", + .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_DC_OFF, + }, + { + .name = "power well 3", + .domains = RKL_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_3, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, + { + .name = "power well 4", + .domains = RKL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + } + }, + { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + } + }, + { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + } + }, + { + .name = "DDI D TC1 IO", + .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, + }, + }, + { + .name = "DDI E TC2 IO", + .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, + }, + }, + { + .name = "AUX A", + .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, + }, + { + .name = "AUX B", + .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, + }, + { + .name = "AUX D TC1", + .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, + }, + }, + { + .name = "AUX E TC2", + .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, + .ops = &icl_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, + }, + }, +}; + static int sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, int disable_power_well) @@ -4322,7 +4513,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, mask = 0; } - if (!i915_modparams.disable_power_well) + if (!dev_priv->params.disable_power_well) max_dc = 0; if (enable_dc >= 0 && enable_dc <= max_dc) { @@ -4365,6 +4556,9 @@ __set_power_wells(struct i915_power_domains *power_domains, const struct i915_power_well_desc *power_well_descs, int power_well_count) { + struct drm_i915_private *i915 = container_of(power_domains, + struct drm_i915_private, + power_domains); u64 power_well_ids = 0; int i; @@ -4384,8 +4578,8 @@ __set_power_wells(struct i915_power_domains *power_domains, if (id == DISP_PW_ID_NONE) continue; - WARN_ON(id >= sizeof(power_well_ids) * 8); - WARN_ON(power_well_ids & BIT_ULL(id)); + drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); + drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); power_well_ids |= BIT_ULL(id); } @@ -4408,11 +4602,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) struct i915_power_domains *power_domains = &dev_priv->power_domains; int err; - i915_modparams.disable_power_well = + dev_priv->params.disable_power_well = sanitize_disable_power_well_option(dev_priv, - i915_modparams.disable_power_well); + dev_priv->params.disable_power_well); dev_priv->csr.allowed_dc_mask = - get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); + get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); dev_priv->csr.target_dc_state = sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); @@ -4428,7 +4622,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. */ - if (IS_GEN(dev_priv, 12)) { + if (IS_ROCKETLAKE(dev_priv)) { + err = set_power_wells(power_domains, rkl_power_wells); + } else if (IS_GEN(dev_priv, 12)) { err = set_power_wells(power_domains, tgl_power_wells); } else if (IS_GEN(dev_priv, 11)) { err = set_power_wells(power_domains, icl_power_wells); @@ -4491,45 +4687,38 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) mutex_unlock(&power_domains->lock); } -static bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, - i915_reg_t reg, bool enable) +static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, + enum dbuf_slice slice, bool enable) { - u32 val, status; + i915_reg_t reg = DBUF_CTL_S(slice); + bool state; + u32 val; val = intel_de_read(dev_priv, reg); - val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST); + if (enable) + val |= DBUF_POWER_REQUEST; + else + val &= ~DBUF_POWER_REQUEST; intel_de_write(dev_priv, reg, val); intel_de_posting_read(dev_priv, reg); udelay(10); - status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; - if ((enable && !status) || (!enable && status)) { - drm_err(&dev_priv->drm, "DBus power %s timeout!\n", - enable ? "enable" : "disable"); - return false; - } - return true; + state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; + drm_WARN(&dev_priv->drm, enable != state, + "DBuf slice %d power %s timeout!\n", + slice, enable ? "enable" : "disable"); } -static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) +void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, + u8 req_slices) { - icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1)); -} - -static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) -{ - icl_dbuf_slices_update(dev_priv, 0); -} - -void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, - u8 req_slices) -{ - int i; - int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; + int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; struct i915_power_domains *power_domains = &dev_priv->power_domains; + enum dbuf_slice slice; - drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices, - "Invalid number of dbuf slices requested\n"); + drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1), + "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n", + req_slices, num_slices); drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", req_slices); @@ -4543,36 +4732,36 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, */ mutex_lock(&power_domains->lock); - for (i = 0; i < max_slices; i++) { - intel_dbuf_slice_set(dev_priv, - DBUF_CTL_S(i), - (req_slices & BIT(i)) != 0); - } + for (slice = DBUF_S1; slice < num_slices; slice++) + gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); - dev_priv->enabled_dbuf_slices_mask = req_slices; + dev_priv->dbuf.enabled_slices = req_slices; mutex_unlock(&power_domains->lock); } -static void icl_dbuf_enable(struct drm_i915_private *dev_priv) +static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) { - skl_ddb_get_hw_state(dev_priv); + dev_priv->dbuf.enabled_slices = + intel_enabled_dbuf_slices_mask(dev_priv); + /* * Just power up at least 1 slice, we will * figure out later which slices we have and what we need. */ - icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask | - BIT(DBUF_S1)); + gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | + dev_priv->dbuf.enabled_slices); } -static void icl_dbuf_disable(struct drm_i915_private *dev_priv) +static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) { - icl_dbuf_slices_update(dev_priv, 0); + gen9_dbuf_slices_update(dev_priv, 0); } static void icl_mbus_init(struct drm_i915_private *dev_priv) { - u32 mask, val; + unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask; + u32 mask, val, i; mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | MBUS_ABOX_BT_CREDIT_POOL2_MASK | @@ -4583,11 +4772,16 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv) MBUS_ABOX_B_CREDIT(1) | MBUS_ABOX_BW_CREDIT(1); - intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val); - if (INTEL_GEN(dev_priv) >= 12) { - intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val); - intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val); - } + /* + * gen12 platforms that use abox1 and abox2 for pixel data reads still + * expect us to program the abox_ctl0 register as well, even though + * we don't have to program other instance-0 registers like BW_BUDDY. + */ + if (IS_GEN(dev_priv, 12)) + abox_regs |= BIT(0); + + for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) + intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); } static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) @@ -5066,7 +5260,8 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) enum intel_dram_type type = dev_priv->dram_info.type; u8 num_channels = dev_priv->dram_info.num_channels; const struct buddy_page_mask *table; - int i; + unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; + int config, i; if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0)) /* Wa_1409767108: tgl */ @@ -5074,29 +5269,27 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) else table = tgl_buddy_page_masks; - for (i = 0; table[i].page_mask != 0; i++) - if (table[i].num_channels == num_channels && - table[i].type == type) + for (config = 0; table[config].page_mask != 0; config++) + if (table[config].num_channels == num_channels && + table[config].type == type) break; - if (table[i].page_mask == 0) { + if (table[config].page_mask == 0) { drm_dbg(&dev_priv->drm, "Unknown memory configuration; disabling address buddy logic.\n"); - intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE); - intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE); + for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) + intel_de_write(dev_priv, BW_BUDDY_CTL(i), + BW_BUDDY_DISABLE); } else { - intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK, - table[i].page_mask); - intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK, - table[i].page_mask); - - /* Wa_22010178259:tgl */ - intel_de_rmw(dev_priv, BW_BUDDY1_CTL, - BW_BUDDY_TLB_REQ_TIMER_MASK, - REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8)); - intel_de_rmw(dev_priv, BW_BUDDY2_CTL, - BW_BUDDY_TLB_REQ_TIMER_MASK, - REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8)); + for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { + intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), + table[config].page_mask); + + /* Wa_22010178259:tgl,rkl */ + intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), + BW_BUDDY_TLB_REQ_TIMER_MASK, + BW_BUDDY_TLB_REQ_TIMER(0x8)); + } } } @@ -5105,6 +5298,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, { struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *well; + u32 val; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -5127,7 +5321,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, intel_cdclk_init_hw(dev_priv); /* 5. Enable DBUF. */ - icl_dbuf_enable(dev_priv); + gen9_dbuf_enable(dev_priv); /* 6. Setup MBUS. */ icl_mbus_init(dev_priv); @@ -5138,6 +5332,13 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, if (resume && dev_priv->csr.dmc_payload) intel_csr_load_program(dev_priv); + + /* Wa_14011508470 */ + if (IS_GEN(dev_priv, 12)) { + val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | + DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; + intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val); + } } static void icl_display_core_uninit(struct drm_i915_private *dev_priv) @@ -5150,7 +5351,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) /* 1. Disable all display engine functions -> aready done */ /* 2. Disable DBUF */ - icl_dbuf_disable(dev_priv); + gen9_dbuf_disable(dev_priv); /* 3. Disable CD clock */ intel_cdclk_uninit_hw(dev_priv); @@ -5375,7 +5576,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) intel_display_power_get(i915, POWER_DOMAIN_INIT); /* Disable power support if the user asked so. */ - if (!i915_modparams.disable_power_well) + if (!i915->params.disable_power_well) intel_display_power_get(i915, POWER_DOMAIN_INIT); intel_power_domains_sync_hw(i915); @@ -5399,7 +5600,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915) fetch_and_zero(&i915->power_domains.wakeref); /* Remove the refcount we took to keep power well support disabled. */ - if (!i915_modparams.disable_power_well) + if (!i915->params.disable_power_well) intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); intel_display_power_flush_work_sync(i915); @@ -5488,7 +5689,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, * Even if power well support was disabled we still want to disable * power wells if power domains must be deinitialized for suspend. */ - if (!i915_modparams.disable_power_well) + if (!i915->params.disable_power_well) intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); intel_display_power_flush_work(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 6c917699293b..54c20c76057e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -314,15 +314,16 @@ intel_display_power_put_async(struct drm_i915_private *i915, enum dbuf_slice { DBUF_S1, DBUF_S2, + I915_MAX_DBUF_SLICES }; +void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, + u8 req_slices); + #define with_intel_display_power(i915, domain, wf) \ for ((wf) = intel_display_power_get((i915), (domain)); (wf); \ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) -void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, - u8 req_slices); - void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask); bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 2bf3d4cb4ea9..4b0aaa3081c9 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -479,16 +479,6 @@ struct intel_atomic_state { bool dpll_set, modeset; - /* - * Does this transaction change the pipes that are active? This mask - * tracks which CRTC's have changed their active state at the end of - * the transaction (not counting the temporary disable during modesets). - * This mask should only be non-zero when intel_state->modeset is true, - * but the converse is not necessarily true; simply changing a mode may - * not flip the final active status of any CRTC's - */ - u8 active_pipe_changes; - u8 active_pipes; struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS]; @@ -506,9 +496,6 @@ struct intel_atomic_state { */ bool global_state_changed; - /* Number of enabled DBuf slices */ - u8 enabled_dbuf_slices_mask; - struct i915_sw_fence commit_ready; struct llist_node freed; @@ -643,8 +630,7 @@ struct intel_crtc_scaler_state { int scaler_id; }; -/* drm_mode->private_flags */ -#define I915_MODE_FLAG_INHERITED (1<<0) +/* {crtc,crtc_state}->mode_flags */ /* Flag to get scanline using frame time stamps */ #define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1) /* Flag to use the scanline counter instead of the pixel counter */ @@ -841,6 +827,7 @@ struct intel_crtc_state { bool update_wm_pre, update_wm_post; /* watermarks are updated */ bool fifo_changed; /* FIFO split is changed */ bool preload_luts; + bool inherited; /* state inherited from BIOS? */ /* Pipe source size (ie. panel fitter input size) * All planes will be positioned inside this space, @@ -956,6 +943,9 @@ struct intel_crtc_state { /* Used by SDVO (and if we ever fix it, HDMI). */ unsigned pixel_multiplier; + /* I915_MODE_FLAG_* */ + u8 mode_flags; + u8 lane_count; /* @@ -1080,6 +1070,9 @@ struct intel_crtc_state { /* Only valid on TGL+ */ enum transcoder mst_master_transcoder; + + /* For DSB related info */ + struct intel_dsb *dsb; }; enum intel_pipe_crc_source { @@ -1118,6 +1111,10 @@ struct intel_crtc { */ bool active; u8 plane_ids_mask; + + /* I915_MODE_FLAG_* */ + u8 mode_flags; + unsigned long long enabled_power_domains; struct intel_overlay *overlay; @@ -1149,9 +1146,6 @@ struct intel_crtc { /* scalers available on this crtc */ int num_scalers; - /* per pipe DSB related info */ - struct intel_dsb dsb; - #ifdef CONFIG_DEBUG_FS struct intel_pipe_crc pipe_crc; #endif @@ -1373,6 +1367,9 @@ struct intel_dp { void (*set_idle_link_train)(struct intel_dp *intel_dp); void (*set_signal_levels)(struct intel_dp *intel_dp); + u8 (*preemph_max)(struct intel_dp *intel_dp); + u8 (*voltage_max)(struct intel_dp *intel_dp); + /* Displayport compliance testing */ struct intel_dp_compliance compliance; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index ed9e53c373a7..c9b93c5706af 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -137,6 +137,8 @@ static const u8 valid_dsc_slicecount[] = {1, 2, 4}; * * If a CPU or PCH DP output is attached to an eDP panel, this function * will return true, and false otherwise. + * + * This function is not safe to use prior to encoder type being set. */ bool intel_dp_is_edp(struct intel_dp *intel_dp) { @@ -409,7 +411,10 @@ static int intel_dp_rate_index(const int *rates, int len, int rate) static void intel_dp_set_common_rates(struct intel_dp *intel_dp) { - WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + drm_WARN_ON(&i915->drm, + !intel_dp->num_source_rates || !intel_dp->num_sink_rates); intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, intel_dp->num_source_rates, @@ -418,7 +423,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp) intel_dp->common_rates); /* Paranoia, there should always be something in common. */ - if (WARN_ON(intel_dp->num_common_rates == 0)) { + if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { intel_dp->common_rates[0] = 162000; intel_dp->num_common_rates = 1; } @@ -465,6 +470,15 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, struct drm_i915_private *i915 = dp_to_i915(intel_dp); int index; + /* + * TODO: Enable fallback on MST links once MST link compute can handle + * the fallback params. + */ + if (intel_dp->is_mst) { + drm_err(&i915->drm, "Link Training Unsuccessful\n"); + return -1; + } + index = intel_dp_rate_index(intel_dp->common_rates, intel_dp->num_common_rates, link_rate); @@ -1555,6 +1569,7 @@ static ssize_t intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 txbuf[20], rxbuf[20]; size_t txsize, rxsize; int ret; @@ -1568,10 +1583,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; rxsize = 2; /* 0 or 1 data bytes */ - if (WARN_ON(txsize > 20)) + if (drm_WARN_ON(&i915->drm, txsize > 20)) return -E2BIG; - WARN_ON(!msg->buffer != !msg->size); + drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); if (msg->buffer) memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); @@ -1596,7 +1611,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; rxsize = msg->size + 1; - if (WARN_ON(rxsize > 20)) + if (drm_WARN_ON(&i915->drm, rxsize > 20)) return -E2BIG; ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, @@ -1871,10 +1886,11 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp) int intel_dp_max_link_rate(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); int len; len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); - if (WARN_ON(len <= 0)) + if (drm_WARN_ON(&i915->drm, len <= 0)) return 162000; return intel_dp->common_rates[len - 1]; @@ -1882,10 +1898,11 @@ intel_dp_max_link_rate(struct intel_dp *intel_dp) int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); int i = intel_dp_rate_index(intel_dp->sink_rates, intel_dp->num_sink_rates, rate); - if (WARN_ON(i < 0)) + if (drm_WARN_ON(&i915->drm, i < 0)) i = 0; return i; @@ -3984,70 +4001,24 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATU DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; } -/* These are source-specific values. */ -u8 -intel_dp_voltage_max(struct intel_dp *intel_dp) +static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - enum port port = encoder->port; + return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; +} - if (HAS_DDI(dev_priv)) - return intel_ddi_dp_voltage_max(encoder); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; - else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) - return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; - else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) - return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; - else - return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; +static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp) +{ + return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; } -u8 -intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing) +static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - enum port port = encoder->port; + return DP_TRAIN_PRE_EMPH_LEVEL_2; +} - if (HAS_DDI(dev_priv)) { - return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing); - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: - return DP_TRAIN_PRE_EMPH_LEVEL_3; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: - return DP_TRAIN_PRE_EMPH_LEVEL_2; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: - return DP_TRAIN_PRE_EMPH_LEVEL_1; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: - default: - return DP_TRAIN_PRE_EMPH_LEVEL_0; - } - } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: - return DP_TRAIN_PRE_EMPH_LEVEL_2; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: - case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: - return DP_TRAIN_PRE_EMPH_LEVEL_1; - default: - return DP_TRAIN_PRE_EMPH_LEVEL_0; - } - } else { - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: - return DP_TRAIN_PRE_EMPH_LEVEL_2; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: - return DP_TRAIN_PRE_EMPH_LEVEL_2; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: - return DP_TRAIN_PRE_EMPH_LEVEL_1; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: - default: - return DP_TRAIN_PRE_EMPH_LEVEL_0; - } - } +static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp) +{ + return DP_TRAIN_PRE_EMPH_LEVEL_3; } static void vlv_set_signal_levels(struct intel_dp *intel_dp) @@ -4330,6 +4301,7 @@ static u32 ivb_cpu_edp_signal_levels(u8 train_set) case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: return EDP_LINK_TRAIN_400MV_3_5DB_IVB; case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: return EDP_LINK_TRAIN_400MV_6DB_IVB; case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -4746,7 +4718,9 @@ intel_dp_sink_can_mst(struct intel_dp *intel_dp) static bool intel_dp_can_mst(struct intel_dp *intel_dp) { - return i915_modparams.enable_dp_mst && + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + return i915->params.enable_dp_mst && intel_dp->can_mst && intel_dp_sink_can_mst(intel_dp); } @@ -4763,13 +4737,13 @@ intel_dp_configure_mst(struct intel_dp *intel_dp) "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", encoder->base.base.id, encoder->base.name, yesno(intel_dp->can_mst), yesno(sink_can_mst), - yesno(i915_modparams.enable_dp_mst)); + yesno(i915->params.enable_dp_mst)); if (!intel_dp->can_mst) return; intel_dp->is_mst = sink_can_mst && - i915_modparams.enable_dp_mst; + i915->params.enable_dp_mst; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); @@ -5595,35 +5569,46 @@ update_status: "Could not write test response to sink\n"); } -static int +/** + * intel_dp_check_mst_status - service any pending MST interrupts, check link status + * @intel_dp: Intel DP struct + * + * Read any pending MST interrupts, call MST core to handle these and ack the + * interrupts. Check if the main and AUX link state is ok. + * + * Returns: + * - %true if pending interrupts were serviced (or no interrupts were + * pending) w/o detecting an error condition. + * - %false if an error condition - like AUX failure or a loss of link - is + * detected, which needs servicing from the hotplug work. + */ +static bool intel_dp_check_mst_status(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); - bool need_retrain = false; - - if (!intel_dp->is_mst) - return -EINVAL; + bool link_ok = true; - WARN_ON_ONCE(intel_dp->active_mst_links < 0); + drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); for (;;) { u8 esi[DP_DPRX_ESI_LEN] = {}; - bool bret, handled; + bool handled; int retry; - bret = intel_dp_get_sink_irq_esi(intel_dp, esi); - if (!bret) { + if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { drm_dbg_kms(&i915->drm, "failed to get ESI - device may have failed\n"); - return -EINVAL; + link_ok = false; + + break; } /* check link status - esi[10] = 0x200c */ - if (intel_dp->active_mst_links > 0 && !need_retrain && + if (intel_dp->active_mst_links > 0 && link_ok && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { drm_dbg_kms(&i915->drm, "channel EQ not ok, retraining\n"); - need_retrain = true; + link_ok = false; } drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); @@ -5643,7 +5628,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) } } - return need_retrain; + return link_ok; } static bool @@ -5966,7 +5951,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) u8 *dpcd = intel_dp->dpcd; u8 type; - if (WARN_ON(intel_dp_is_edp(intel_dp))) + if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) return connector_status_connected; if (lspcon->active) @@ -6191,7 +6176,17 @@ intel_dp_detect(struct drm_connector *connector, goto out; } - if (intel_dp->reset_link_params) { + /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ + if (INTEL_GEN(dev_priv) >= 11) + intel_dp_get_dsc_sink_cap(intel_dp); + + intel_dp_configure_mst(intel_dp); + + /* + * TODO: Reset link params when switching to MST mode, until MST + * supports link training fallback params. + */ + if (intel_dp->reset_link_params || intel_dp->is_mst) { /* Initial max link lane count */ intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); @@ -6203,12 +6198,6 @@ intel_dp_detect(struct drm_connector *connector, intel_dp_print_rates(intel_dp); - /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ - if (INTEL_GEN(dev_priv) >= 11) - intel_dp_get_dsc_sink_cap(intel_dp); - - intel_dp_configure_mst(intel_dp); - if (intel_dp->is_mst) { /* * If we are in MST mode then this connector @@ -7294,35 +7283,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) } if (intel_dp->is_mst) { - switch (intel_dp_check_mst_status(intel_dp)) { - case -EINVAL: - /* - * If we were in MST mode, and device is not - * there, get out of MST mode - */ - drm_dbg_kms(&i915->drm, - "MST device may have disappeared %d vs %d\n", - intel_dp->is_mst, - intel_dp->mst_mgr.mst_state); - intel_dp->is_mst = false; - drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, - intel_dp->is_mst); - - return IRQ_NONE; - case 1: - return IRQ_NONE; - default: - break; - } - } - - if (!intel_dp->is_mst) { - bool handled; - - handled = intel_dp_short_pulse(intel_dp); - - if (!handled) + if (!intel_dp_check_mst_status(intel_dp)) return IRQ_NONE; + } else if (!intel_dp_short_pulse(intel_dp)) { + return IRQ_NONE; } return IRQ_HANDLED; @@ -7694,7 +7658,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, return; } - if (intel_dp->attached_connector->panel.downclock_mode->vrefresh == + if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == refresh_rate) index = DRRS_LOW_RR; @@ -7807,7 +7771,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp, if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) intel_dp_set_drrs_state(dev_priv, old_crtc_state, - intel_dp->attached_connector->panel.fixed_mode->vrefresh); + drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); dev_priv->drrs.dp = NULL; mutex_unlock(&dev_priv->drrs.mutex); @@ -7840,7 +7804,7 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work) struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - intel_dp->attached_connector->panel.downclock_mode->vrefresh); + drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); } unlock: @@ -7860,6 +7824,7 @@ unlock: void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits) { + struct intel_dp *intel_dp; struct drm_crtc *crtc; enum pipe pipe; @@ -7869,12 +7834,14 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, cancel_delayed_work(&dev_priv->drrs.work); mutex_lock(&dev_priv->drrs.mutex); - if (!dev_priv->drrs.dp) { + + intel_dp = dev_priv->drrs.dp; + if (!intel_dp) { mutex_unlock(&dev_priv->drrs.mutex); return; } - crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; + crtc = dp_to_dig_port(intel_dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); @@ -7883,7 +7850,7 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, /* invalidate means busy screen hence upclock */ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); + drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); mutex_unlock(&dev_priv->drrs.mutex); } @@ -7903,6 +7870,7 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits) { + struct intel_dp *intel_dp; struct drm_crtc *crtc; enum pipe pipe; @@ -7912,12 +7880,14 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, cancel_delayed_work(&dev_priv->drrs.work); mutex_lock(&dev_priv->drrs.mutex); - if (!dev_priv->drrs.dp) { + + intel_dp = dev_priv->drrs.dp; + if (!intel_dp) { mutex_unlock(&dev_priv->drrs.mutex); return; } - crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; + crtc = dp_to_dig_port(intel_dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); @@ -7926,7 +7896,7 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, /* flush means busy screen hence upclock */ if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); + drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); /* * flush also means no more activity hence schedule downclock, if all @@ -8189,8 +8159,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_encoder->base.name)) return false; - intel_dp_set_source_rates(intel_dp); - intel_dp->reset_link_params = true; intel_dp->pps_pipe = INVALID_PIPE; intel_dp->active_pipe = INVALID_PIPE; @@ -8206,28 +8174,22 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, */ drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); type = DRM_MODE_CONNECTOR_eDP; + intel_encoder->type = INTEL_OUTPUT_EDP; + + /* eDP only on port B and/or C on vlv/chv */ + if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv)) && + port != PORT_B && port != PORT_C)) + return false; } else { type = DRM_MODE_CONNECTOR_DisplayPort; } + intel_dp_set_source_rates(intel_dp); + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) intel_dp->active_pipe = vlv_active_pipe(intel_dp); - /* - * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but - * for DP the encoder type can be set by the caller to - * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. - */ - if (type == DRM_MODE_CONNECTOR_eDP) - intel_encoder->type = INTEL_OUTPUT_EDP; - - /* eDP only on port B and/or C on vlv/chv */ - if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv)) && - intel_dp_is_edp(intel_dp) && - port != PORT_B && port != PORT_C)) - return false; - drm_dbg_kms(&dev_priv->drm, "Adding %s connector on [ENCODER:%d:%s]\n", type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", @@ -8360,6 +8322,15 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, else intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels; + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || + (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { + intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3; + intel_dig_port->dp.voltage_max = intel_dp_voltage_max_3; + } else { + intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2; + intel_dig_port->dp.voltage_max = intel_dp_voltage_max_2; + } + intel_dig_port->dp.output_reg = output_reg; intel_dig_port->max_lanes = 4; intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 1702959ca079..0a8950f744f6 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -92,10 +92,6 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, void intel_dp_set_signal_levels(struct intel_dp *intel_dp); void intel_dp_set_idle_link_train(struct intel_dp *intel_dp); -u8 -intel_dp_voltage_max(struct intel_dp *intel_dp); -u8 -intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing); void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select); bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 0722540d64ad..acbd7eb66cbe 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -348,7 +348,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder); struct drm_i915_private *i915 = dp_to_i915(intel_dp); - if (i915_modparams.enable_dpcd_backlight == 0 || + if (i915->params.enable_dpcd_backlight == 0 || !intel_dp_aux_display_control_capable(intel_connector)) return -ENODEV; @@ -358,7 +358,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) */ if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE && - i915_modparams.enable_dpcd_backlight != 1 && + i915->params.enable_dpcd_backlight != 1 && !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks, DP_QUIRK_FORCE_DPCD_BACKLIGHT)) { drm_info(&i915->drm, diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index e4f1843170b7..2493142a70e9 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -34,6 +34,21 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE]) link_status[3], link_status[4], link_status[5]); } +static u8 dp_voltage_max(u8 preemph) +{ + switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + return DP_TRAIN_VOLTAGE_SWING_LEVEL_1; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + default: + return DP_TRAIN_VOLTAGE_SWING_LEVEL_0; + } +} + void intel_dp_get_adjust_train(struct intel_dp *intel_dp, const u8 link_status[DP_LINK_STATUS_SIZE]) { @@ -44,23 +59,20 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp, u8 preemph_max; for (lane = 0; lane < intel_dp->lane_count; lane++) { - u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane); - u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); - - if (this_v > v) - v = this_v; - if (this_p > p) - p = this_p; + v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane)); + p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane)); } - voltage_max = intel_dp_voltage_max(intel_dp); - if (v >= voltage_max) - v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; - - preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); + preemph_max = intel_dp->preemph_max(intel_dp); if (p >= preemph_max) p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + v = min(v, dp_voltage_max(p)); + + voltage_max = intel_dp->voltage_max(intel_dp); + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + for (lane = 0; lane < 4; lane++) intel_dp->train_set[lane] = v | p; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index f29e51ce489c..8273f2e07427 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -33,6 +33,7 @@ #include "intel_connector.h" #include "intel_ddi.h" #include "intel_display_types.h" +#include "intel_hotplug.h" #include "intel_dp.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" @@ -316,6 +317,25 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, return ret; } +static void clear_act_sent(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + intel_de_write(i915, intel_dp->regs.dp_tp_status, + DP_TP_STATUS_ACT_SENT); +} + +static void wait_for_act_sent(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + if (intel_de_wait_for_set(i915, intel_dp->regs.dp_tp_status, + DP_TP_STATUS_ACT_SENT, 1)) + drm_err(&i915->drm, "Timed out waiting for ACT sent\n"); + + drm_dp_check_act_status(&intel_dp->mst_mgr); +} + static void intel_mst_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, @@ -369,6 +389,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, drm_dp_update_payload_part2(&intel_dp->mst_mgr); + clear_act_sent(intel_dp); + val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder)); val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC; @@ -376,11 +398,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val); - if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, - DP_TP_STATUS_ACT_SENT, 1)) - drm_err(&dev_priv->drm, - "Timed out waiting for ACT sent when disabling\n"); - drm_dp_check_act_status(&intel_dp->mst_mgr); + wait_for_act_sent(intel_dp); drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port); @@ -451,7 +469,6 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, struct intel_connector *connector = to_intel_connector(conn_state->connector); int ret; - u32 temp; bool first_mst_stream; /* MST encoders are bound to a crtc, not to a connector, @@ -484,8 +501,6 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, drm_err(&dev_priv->drm, "failed to allocate vcpi\n"); intel_dp->active_mst_links++; - temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status); - intel_de_write(dev_priv, intel_dp->regs.dp_tp_status, temp); ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); @@ -513,19 +528,25 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 val; drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); + clear_act_sent(intel_dp); + intel_ddi_enable_transcoder_func(encoder, pipe_config); + val = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); + val |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + intel_de_write(dev_priv, + TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder), + val); + drm_dbg_kms(&dev_priv->drm, "active links %d\n", intel_dp->active_mst_links); - if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, - DP_TP_STATUS_ACT_SENT, 1)) - drm_err(&dev_priv->drm, "Timed out waiting for ACT sent\n"); - - drm_dp_check_act_status(&intel_dp->mst_mgr); + wait_for_act_sent(intel_dp); drm_dp_update_payload_part2(&intel_dp->mst_mgr); @@ -773,8 +794,17 @@ err: return NULL; } +static void +intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr) +{ + struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); + + intel_hpd_trigger_irq(dp_to_dig_port(intel_dp)); +} + static const struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, + .poll_hpd_irq = intel_dp_mst_poll_hpd_irq, }; static struct intel_dp_mst_encoder * diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index b45185b80bec..aeb6ee395cce 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2934,6 +2934,15 @@ static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = { static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = { .dco_integer = 0x43, .dco_fraction = 0x4000, /* the following params are unused */ +}; + +/* + * Display WA #22010492432: tgl + * Divide the nominal .dco_fraction value by 2. + */ +static const struct skl_wrpll_params tgl_tbt_pll_38_4MHz_values = { + .dco_integer = 0x54, .dco_fraction = 0x1800, + /* the following params are unused */ .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0, }; @@ -2970,12 +2979,14 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, MISSING_CASE(dev_priv->dpll.ref_clks.nssc); /* fall-through */ case 19200: - case 38400: *pll_params = tgl_tbt_pll_19_2MHz_values; break; case 24000: *pll_params = tgl_tbt_pll_24MHz_values; break; + case 38400: + *pll_params = tgl_tbt_pll_38_4MHz_values; + break; } } else { switch (dev_priv->dpll.ref_clks.nssc) { @@ -3038,49 +3049,26 @@ static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915, icl_wrpll_ref_clock(i915)); } -static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder, +static void icl_calc_dpll_state(struct drm_i915_private *i915, + const struct skl_wrpll_params *pll_params, struct intel_dpll_hw_state *pll_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - u32 cfgcr0, cfgcr1; - struct skl_wrpll_params pll_params = { 0 }; - bool ret; - - if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, - encoder->port))) - ret = icl_calc_tbt_pll(crtc_state, &pll_params); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - ret = icl_calc_wrpll(crtc_state, &pll_params); - else - ret = icl_calc_dp_combo_pll(crtc_state, &pll_params); - - if (!ret) - return false; + memset(pll_state, 0, sizeof(*pll_state)); - cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) | - pll_params.dco_integer; + pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params->dco_fraction) | + pll_params->dco_integer; - cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) | - DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) | - DPLL_CFGCR1_KDIV(pll_params.kdiv) | - DPLL_CFGCR1_PDIV(pll_params.pdiv); + pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) | + DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) | + DPLL_CFGCR1_KDIV(pll_params->kdiv) | + DPLL_CFGCR1_PDIV(pll_params->pdiv); - if (INTEL_GEN(dev_priv) >= 12) - cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL; + if (INTEL_GEN(i915) >= 12) + pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL; else - cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400; - - memset(pll_state, 0, sizeof(*pll_state)); - - pll_state->cfgcr0 = cfgcr0; - pll_state->cfgcr1 = cfgcr1; - - return true; + pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400; } - static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id) { return id - DPLL_ID_ICL_MGPLL1; @@ -3493,19 +3481,29 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + struct skl_wrpll_params pll_params = { }; struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum port port = encoder->port; unsigned long dpll_mask; + int ret; - if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + ret = icl_calc_wrpll(crtc_state, &pll_params); + else + ret = icl_calc_dp_combo_pll(crtc_state, &pll_params); + + if (!ret) { drm_dbg_kms(&dev_priv->drm, "Could not calculate combo PHY PLL state.\n"); return false; } + icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); + if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) dpll_mask = BIT(DPLL_ID_EHL_DPLL4) | @@ -3539,16 +3537,19 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + struct skl_wrpll_params pll_params = { }; struct icl_port_dpll *port_dpll; enum intel_dpll_id dpll_id; port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; - if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { + if (!icl_calc_tbt_pll(crtc_state, &pll_params)) { drm_dbg_kms(&dev_priv->drm, "Could not calculate TBT PLL state.\n"); return false; } + icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); + port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, BIT(DPLL_ID_ICL_TBTPLL)); diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 29fec6a92d17..566fa72427b3 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -34,152 +34,52 @@ #define DSB_BYTE_EN_SHIFT 20 #define DSB_REG_VALUE_MASK 0xfffff -static bool is_dsb_busy(struct intel_dsb *dsb) +static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe, + enum dsb_id id) { - struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - - return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id)); + return DSB_STATUS & intel_de_read(i915, DSB_CTRL(pipe, id)); } -static bool intel_dsb_enable_engine(struct intel_dsb *dsb) +static bool intel_dsb_enable_engine(struct drm_i915_private *i915, + enum pipe pipe, enum dsb_id id) { - struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; u32 dsb_ctrl; - dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id)); + dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id)); if (DSB_STATUS & dsb_ctrl) { - drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n"); + drm_dbg_kms(&i915->drm, "DSB engine is busy.\n"); return false; } dsb_ctrl |= DSB_ENABLE; - intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl); + intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl); - intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id)); + intel_de_posting_read(i915, DSB_CTRL(pipe, id)); return true; } -static bool intel_dsb_disable_engine(struct intel_dsb *dsb) +static bool intel_dsb_disable_engine(struct drm_i915_private *i915, + enum pipe pipe, enum dsb_id id) { - struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; u32 dsb_ctrl; - dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id)); + dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id)); if (DSB_STATUS & dsb_ctrl) { - drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n"); + drm_dbg_kms(&i915->drm, "DSB engine is busy.\n"); return false; } dsb_ctrl &= ~DSB_ENABLE; - intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl); + intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl); - intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id)); + intel_de_posting_read(i915, DSB_CTRL(pipe, id)); return true; } /** - * intel_dsb_get() - Allocate DSB context and return a DSB instance. - * @crtc: intel_crtc structure to get pipe info. - * - * This function provides handle of a DSB instance, for the further DSB - * operations. - * - * Returns: address of Intel_dsb instance requested for. - * Failure: Returns the same DSB instance, but without a command buffer. - */ - -struct intel_dsb * -intel_dsb_get(struct intel_crtc *crtc) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *i915 = to_i915(dev); - struct intel_dsb *dsb = &crtc->dsb; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - u32 *buf; - intel_wakeref_t wakeref; - - if (!HAS_DSB(i915)) - return dsb; - - if (dsb->refcount++ != 0) - return dsb; - - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE); - if (IS_ERR(obj)) { - drm_err(&i915->drm, "Gem object creation failed\n"); - goto out; - } - - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) { - drm_err(&i915->drm, "Vma creation failed\n"); - i915_gem_object_put(obj); - goto out; - } - - buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); - if (IS_ERR(buf)) { - drm_err(&i915->drm, "Command buffer creation failed\n"); - goto out; - } - - dsb->id = DSB1; - dsb->vma = vma; - dsb->cmd_buf = buf; - -out: - /* - * On error dsb->cmd_buf will continue to be NULL, making the writes - * pass-through. Leave the dangling ref to be removed later by the - * corresponding intel_dsb_put(): the important error message will - * already be logged above. - */ - - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - - return dsb; -} - -/** - * intel_dsb_put() - To destroy DSB context. - * @dsb: intel_dsb structure. - * - * This function destroys the DSB context allocated by a dsb_get(), by - * unpinning and releasing the VMA object associated with it. - */ - -void intel_dsb_put(struct intel_dsb *dsb) -{ - struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - - if (!HAS_DSB(i915)) - return; - - if (drm_WARN_ON(&i915->drm, dsb->refcount == 0)) - return; - - if (--dsb->refcount == 0) { - i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP); - dsb->cmd_buf = NULL; - dsb->free_pos = 0; - dsb->ins_start_offset = 0; - } -} - -/** * intel_dsb_indexed_reg_write() -Write to the DSB context for auto * increment register. - * @dsb: intel_dsb structure. + * @crtc_state: intel_crtc_state structure * @reg: register address. * @val: value. * @@ -189,19 +89,20 @@ void intel_dsb_put(struct intel_dsb *dsb) * is done through mmio write. */ -void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, - u32 val) +void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state, + i915_reg_t reg, u32 val) { - struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct intel_dsb *dsb = crtc_state->dsb; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 *buf = dsb->cmd_buf; + u32 *buf; u32 reg_val; - if (!buf) { + if (!dsb) { intel_de_write(dev_priv, reg, val); return; } - + buf = dsb->cmd_buf; if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) { drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n"); return; @@ -256,7 +157,7 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, /** * intel_dsb_reg_write() -Write to the DSB context for normal * register. - * @dsb: intel_dsb structure. + * @crtc_state: intel_crtc_state structure * @reg: register address. * @val: value. * @@ -265,17 +166,21 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, * and rest all erroneous condition register programming is done * through mmio write. */ -void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) +void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state, + i915_reg_t reg, u32 val) { - struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 *buf = dsb->cmd_buf; + struct intel_dsb *dsb; + u32 *buf; - if (!buf) { + dsb = crtc_state->dsb; + if (!dsb) { intel_de_write(dev_priv, reg, val); return; } + buf = dsb->cmd_buf; if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) { drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n"); return; @@ -290,26 +195,27 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) /** * intel_dsb_commit() - Trigger workload execution of DSB. - * @dsb: intel_dsb structure. + * @crtc_state: intel_crtc_state structure * * This function is used to do actual write to hardware using DSB. * On errors, fall back to MMIO. Also this function help to reset the context. */ -void intel_dsb_commit(struct intel_dsb *dsb) +void intel_dsb_commit(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct intel_dsb *dsb = crtc_state->dsb; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; u32 tail; - if (!dsb->free_pos) + if (!(dsb && dsb->free_pos)) return; - if (!intel_dsb_enable_engine(dsb)) + if (!intel_dsb_enable_engine(dev_priv, pipe, dsb->id)) goto reset; - if (is_dsb_busy(dsb)) { + if (is_dsb_busy(dev_priv, pipe, dsb->id)) { drm_err(&dev_priv->drm, "HEAD_PTR write failed - dsb engine is busy.\n"); goto reset; @@ -322,7 +228,7 @@ void intel_dsb_commit(struct intel_dsb *dsb) memset(&dsb->cmd_buf[dsb->free_pos], 0, (tail - dsb->free_pos * 4)); - if (is_dsb_busy(dsb)) { + if (is_dsb_busy(dev_priv, pipe, dsb->id)) { drm_err(&dev_priv->drm, "TAIL_PTR write failed - dsb engine is busy.\n"); goto reset; @@ -332,7 +238,7 @@ void intel_dsb_commit(struct intel_dsb *dsb) i915_ggtt_offset(dsb->vma), tail); intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail); - if (wait_for(!is_dsb_busy(dsb), 1)) { + if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) { drm_err(&dev_priv->drm, "Timed out waiting for DSB workload completion.\n"); goto reset; @@ -341,5 +247,83 @@ void intel_dsb_commit(struct intel_dsb *dsb) reset: dsb->free_pos = 0; dsb->ins_start_offset = 0; - intel_dsb_disable_engine(dsb); + intel_dsb_disable_engine(dev_priv, pipe, dsb->id); +} + +/** + * intel_dsb_prepare() - Allocate, pin and map the DSB command buffer. + * @crtc_state: intel_crtc_state structure to prepare associated dsb instance. + * + * This function prepare the command buffer which is used to store dsb + * instructions with data. + */ +void intel_dsb_prepare(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_dsb *dsb; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 *buf; + intel_wakeref_t wakeref; + + if (!HAS_DSB(i915)) + return; + + dsb = kmalloc(sizeof(*dsb), GFP_KERNEL); + if (!dsb) { + drm_err(&i915->drm, "DSB object creation failed\n"); + return; + } + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE); + if (IS_ERR(obj)) { + drm_err(&i915->drm, "Gem object creation failed\n"); + kfree(dsb); + goto out; + } + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) { + drm_err(&i915->drm, "Vma creation failed\n"); + i915_gem_object_put(obj); + kfree(dsb); + goto out; + } + + buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); + if (IS_ERR(buf)) { + drm_err(&i915->drm, "Command buffer creation failed\n"); + i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); + kfree(dsb); + goto out; + } + + dsb->id = DSB1; + dsb->vma = vma; + dsb->cmd_buf = buf; + dsb->free_pos = 0; + dsb->ins_start_offset = 0; + crtc_state->dsb = dsb; +out: + intel_runtime_pm_put(&i915->runtime_pm, wakeref); +} + +/** + * intel_dsb_cleanup() - To cleanup DSB context. + * @crtc_state: intel_crtc_state structure to cleanup associated dsb instance. + * + * This function cleanup the DSB context by unpinning and releasing + * the VMA object associated with it. + */ +void intel_dsb_cleanup(struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->dsb) + return; + + i915_vma_unpin_and_release(&crtc_state->dsb->vma, I915_VMA_RELEASE_MAP); + kfree(crtc_state->dsb); + crtc_state->dsb = NULL; } diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index 395ef9ce558e..654a11f24b80 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -10,7 +10,7 @@ #include "i915_reg.h" -struct intel_crtc; +struct intel_crtc_state; struct i915_vma; enum dsb_id { @@ -22,7 +22,6 @@ enum dsb_id { }; struct intel_dsb { - long refcount; enum dsb_id id; u32 *cmd_buf; struct i915_vma *vma; @@ -41,12 +40,12 @@ struct intel_dsb { u32 ins_start_offset; }; -struct intel_dsb * -intel_dsb_get(struct intel_crtc *crtc); -void intel_dsb_put(struct intel_dsb *dsb); -void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val); -void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, - u32 val); -void intel_dsb_commit(struct intel_dsb *dsb); +void intel_dsb_prepare(struct intel_crtc_state *crtc_state); +void intel_dsb_cleanup(struct intel_crtc_state *crtc_state); +void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state, + i915_reg_t reg, u32 val); +void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state, + i915_reg_t reg, u32 val); +void intel_dsb_commit(const struct intel_crtc_state *crtc_state); #endif diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 412572f88b67..a2de57ede276 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -132,14 +132,13 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) } /* enable it... */ - fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); - fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; + fbc_ctl = FBC_CTL_INTERVAL(params->interval); fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; if (IS_I945GM(dev_priv)) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ - fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; + fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff); if (params->fence_id >= 0) - fbc_ctl |= params->fence_id; + fbc_ctl |= FBC_CTL_FENCENO(params->fence_id); intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); } @@ -699,6 +698,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, cache->fb.stride = fb->pitches[0]; cache->fb.modifier = fb->modifier; + /* FBC1 compression interval: arbitrary choice of 1 second */ + cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); + cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && @@ -747,7 +749,7 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) return false; } - if (!i915_modparams.enable_fbc) { + if (!dev_priv->params.enable_fbc) { fbc->no_fbc_reason = "disabled per module param or by default"; return false; } @@ -892,6 +894,8 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, params->fence_id = cache->fence_id; params->fence_y_offset = cache->fence_y_offset; + params->interval = cache->interval; + params->crtc.pipe = crtc->pipe; params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; @@ -1028,7 +1032,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) fbc->flip_pending = false; - if (!i915_modparams.enable_fbc) { + if (!dev_priv->params.enable_fbc) { intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); __intel_fbc_disable(dev_priv); @@ -1101,11 +1105,19 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, if (!HAS_FBC(dev_priv)) return; + /* + * GTT tracking does not nuke the entire cfb + * so don't clear busy_bits set for some other + * reason. + */ + if (origin == ORIGIN_GTT) + return; + mutex_lock(&fbc->lock); fbc->busy_bits &= ~frontbuffer_bits; - if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + if (origin == ORIGIN_FLIP) goto out; if (!fbc->busy_bits && fbc->crtc && @@ -1377,8 +1389,8 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) */ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) { - if (i915_modparams.enable_fbc >= 0) - return !!i915_modparams.enable_fbc; + if (dev_priv->params.enable_fbc >= 0) + return !!dev_priv->params.enable_fbc; if (!HAS_FBC(dev_priv)) return 0; @@ -1422,20 +1434,15 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) if (need_fbc_vtd_wa(dev_priv)) mkwrite_device_info(dev_priv)->display.has_fbc = false; - i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); + dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv); drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", - i915_modparams.enable_fbc); + dev_priv->params.enable_fbc); if (!HAS_FBC(dev_priv)) { fbc->no_fbc_reason = "unsupported by this chipset"; return; } - /* This value was pulled out of someone's hat */ - if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv)) - intel_de_write(dev_priv, FBC_CONTROL, - 500 << FBC_CTL_INTERVAL_SHIFT); - /* We still don't have any sort of hardware state readout for FBC, so * deactivate it in case the BIOS activated it to make sure software * matches the hardware state. */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 2cbc4619b4ce..815b054bb167 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -1923,8 +1923,11 @@ static bool is_hdcp2_supported(struct drm_i915_private *dev_priv) if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) return false; - return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || - IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)); + return (INTEL_GEN(dev_priv) >= 10 || + IS_GEMINILAKE(dev_priv) || + IS_KABYLAKE(dev_priv) || + IS_COFFEELAKE(dev_priv) || + IS_COMETLAKE(dev_priv)); } void intel_hdcp_component_init(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 95b6d9457910..ae4ae800e908 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1540,7 +1540,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, } static -bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) +bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port) { struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_connector *connector = @@ -1563,8 +1563,7 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) == (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { - drm_err(&i915->drm, - "Ri' mismatch detected, link check failed (%x)\n", + drm_dbg_kms(&i915->drm, "Ri' mismatch detected (%x)\n", intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); return false; @@ -1572,6 +1571,20 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) return true; } +static +bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) +{ + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + int retry; + + for (retry = 0; retry < 3; retry++) + if (intel_hdmi_hdcp_check_link_once(intel_dig_port)) + return true; + + drm_err(&i915->drm, "Link check failed\n"); + return false; +} + struct hdcp2_hdmi_msg_timeout { u8 msg_id; u16 timeout; @@ -3076,6 +3089,24 @@ static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) return ddc_pin; } +static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) +{ + enum phy phy = intel_port_to_phy(dev_priv, port); + + WARN_ON(port == PORT_C); + + /* + * Pin mapping for RKL depends on which PCH is present. With TGP, the + * final two outputs use type-c pins, even though they're actually + * combo outputs. With CMP, the traditional DDI A-D pins are used for + * all outputs. + */ + if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && phy >= PHY_C) + return GMBUS_PIN_9_TC1_ICP + phy - PHY_C; + + return GMBUS_PIN_1_BXT + phy; +} + static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { @@ -3113,7 +3144,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) return ddc_pin; } - if (HAS_PCH_MCC(dev_priv)) + if (IS_ROCKETLAKE(dev_priv)) + ddc_pin = rkl_port_to_ddc_pin(dev_priv, port); + else if (HAS_PCH_MCC(dev_priv)) ddc_pin = mcc_port_to_ddc_pin(dev_priv, port); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) ddc_pin = icl_port_to_ddc_pin(dev_priv, port); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 4f6f560e093e..3f1d7b804a66 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -89,6 +89,15 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, { enum phy phy = intel_port_to_phy(dev_priv, port); + /* + * RKL + TGP PCH is a special case; we effectively choose the hpd_pin + * based on the DDI rather than the PHY (i.e., the last two outputs + * shold be HPD_PORT_{D,E} rather than {C,D}. Note that this differs + * from the behavior of both TGL+TGP and RKL+CMP. + */ + if (IS_ROCKETLAKE(dev_priv) && HAS_PCH_TGP(dev_priv)) + return HPD_PORT_A + port - PORT_A; + switch (phy) { case PHY_F: return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F; @@ -274,24 +283,30 @@ intel_encoder_hotplug(struct intel_encoder *encoder, { struct drm_device *dev = connector->base.dev; enum drm_connector_status old_status; + u64 old_epoch_counter; + bool ret = false; drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); old_status = connector->base.status; + old_epoch_counter = connector->base.epoch_counter; connector->base.status = drm_helper_probe_detect(&connector->base, NULL, false); - if (old_status == connector->base.status) - return INTEL_HOTPLUG_UNCHANGED; - - drm_dbg_kms(&to_i915(dev)->drm, - "[CONNECTOR:%d:%s] status updated from %s to %s\n", - connector->base.base.id, - connector->base.name, - drm_get_connector_status_name(old_status), - drm_get_connector_status_name(connector->base.status)); + if (old_epoch_counter != connector->base.epoch_counter) + ret = true; - return INTEL_HOTPLUG_CHANGED; + if (ret) { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", + connector->base.base.id, + connector->base.name, + drm_get_connector_status_name(old_status), + drm_get_connector_status_name(connector->base.status), + old_epoch_counter, + connector->base.epoch_counter); + return INTEL_HOTPLUG_CHANGED; + } + return INTEL_HOTPLUG_UNCHANGED; } static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) @@ -347,6 +362,24 @@ static void i915_digport_work_func(struct work_struct *work) } } +/** + * intel_hpd_trigger_irq - trigger an hpd irq event for a port + * @dig_port: digital port + * + * Trigger an HPD interrupt event for the given port, emulating a short pulse + * generated by the sink, and schedule the dig port work to handle it. + */ +void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + + spin_lock_irq(&i915->irq_lock); + i915->hotplug.short_port_mask |= BIT(dig_port->base.port); + spin_unlock_irq(&i915->irq_lock); + + queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work); +} + /* * Handle hotplug events outside the interrupt handler proper. */ diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index 777b0743257e..a704d7c94d16 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -10,6 +10,7 @@ struct drm_i915_private; struct intel_connector; +struct intel_digital_port; struct intel_encoder; enum port; @@ -18,6 +19,7 @@ enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, struct intel_connector *connector); void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask); +void intel_hpd_trigger_irq(struct intel_digital_port *dig_port); void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 872f2a489339..1888611244db 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -784,8 +784,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) struct drm_i915_private *dev_priv = to_i915(dev); /* use the module option value if specified */ - if (i915_modparams.lvds_channel_mode > 0) - return i915_modparams.lvds_channel_mode == 2; + if (dev_priv->params.lvds_channel_mode > 0) + return dev_priv->params.lvds_channel_mode == 2; /* single channel LVDS is limited to 112 MHz */ if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999) diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index cc6b00959586..de995362f428 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -801,7 +801,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->opregion; const struct firmware *fw = NULL; - const char *name = i915_modparams.vbt_firmware; + const char *name = dev_priv->params.vbt_firmware; int ret; if (!name || !*name) diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 66711e62fa71..52b4f6193b4c 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -100,12 +100,15 @@ #define CLK_RGB24_MASK 0x0 #define CLK_RGB16_MASK 0x070307 #define CLK_RGB15_MASK 0x070707 -#define CLK_RGB8I_MASK 0xffffff +#define RGB30_TO_COLORKEY(c) \ + ((((c) & 0x3fc00000) >> 6) | (((c) & 0x000ff000) >> 4) | (((c) & 0x000003fc) >> 2)) #define RGB16_TO_COLORKEY(c) \ - (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3)) + ((((c) & 0xf800) << 8) | (((c) & 0x07e0) << 5) | (((c) & 0x001f) << 3)) #define RGB15_TO_COLORKEY(c) \ - (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3)) + ((((c) & 0x7c00) << 9) | (((c) & 0x03e0) << 6) | (((c) & 0x001f) << 3)) +#define RGB8I_TO_COLORKEY(c) \ + ((((c) & 0xff) << 16) | (((c) & 0xff) << 8) | (((c) & 0xff) << 0)) /* overlay flip addr flag */ #define OFC_UPDATE 0x1 @@ -682,8 +685,8 @@ static void update_colorkey(struct intel_overlay *overlay, switch (format) { case DRM_FORMAT_C8: - key = 0; - flags |= CLK_RGB8I_MASK; + key = RGB8I_TO_COLORKEY(key); + flags |= CLK_RGB24_MASK; break; case DRM_FORMAT_XRGB1555: key = RGB15_TO_COLORKEY(key); @@ -693,6 +696,11 @@ static void update_colorkey(struct intel_overlay *overlay, key = RGB16_TO_COLORKEY(key); flags |= CLK_RGB16_MASK; break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + key = RGB30_TO_COLORKEY(key); + flags |= CLK_RGB24_MASK; + break; default: flags |= CLK_RGB24_MASK; break; @@ -777,9 +785,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB); if (!overlay->active) { - u32 oconfig; - - oconfig = OCONF_CC_OUT_8BIT; + const struct intel_crtc_state *crtc_state = + overlay->crtc->config; + u32 oconfig = 0; + + if (crtc_state->gamma_enable && + crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + oconfig |= OCONF_CC_OUT_8BIT; + if (crtc_state->gamma_enable) + oconfig |= OCONF_GAMMA2_ENABLE; if (IS_GEN(dev_priv, 4)) oconfig |= OCONF_CSC_MODE_BT709; oconfig |= pipe == 0 ? diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 3c5056dbf607..aaed9eb3b56c 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -521,10 +521,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); - if (i915_modparams.invert_brightness < 0) + if (dev_priv->params.invert_brightness < 0) return val; - if (i915_modparams.invert_brightness > 0 || + if (dev_priv->params.invert_brightness > 0 || dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { return panel->backlight.max - val + panel->backlight.min; } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index b7a2c102648a..611cb8d74811 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -83,7 +83,7 @@ static bool psr_global_enabled(struct drm_i915_private *i915) { switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DEFAULT: - return i915_modparams.enable_psr; + return i915->params.enable_psr; case I915_PSR_DEBUG_DISABLE: return false; default: @@ -426,6 +426,12 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) if (INTEL_GEN(dev_priv) >= 11) val |= EDP_PSR_TP4_TIME_0US; + if (dev_priv->params.psr_safest_params) { + val |= EDP_PSR_TP1_TIME_2500us; + val |= EDP_PSR_TP2_TP3_TIME_2500us; + goto check_tp3_sel; + } + if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) val |= EDP_PSR_TP1_TIME_0us; else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) @@ -444,6 +450,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) else val |= EDP_PSR_TP2_TP3_TIME_2500us; +check_tp3_sel: if (intel_dp_source_supports_hbr2(intel_dp) && drm_dp_tps3_supported(intel_dp->dpcd)) val |= EDP_PSR_TP1_TP3_SEL; @@ -495,18 +502,13 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val); } -static void hsw_activate_psr2(struct intel_dp *intel_dp) +static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 val; - - val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT; - - val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - val |= EDP_Y_COORDINATE_ENABLE; + u32 val = 0; - val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); + if (dev_priv->params.psr_safest_params) + return EDP_PSR2_TP2_TIME_2500us; if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) @@ -518,6 +520,39 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) else val |= EDP_PSR2_TP2_TIME_2500us; + return val; +} + +static void hsw_activate_psr2(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u32 val; + + val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT; + + val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + val |= EDP_Y_COORDINATE_ENABLE; + + val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); + val |= intel_psr2_get_tp_time(intel_dp); + + if (INTEL_GEN(dev_priv) >= 12) { + /* + * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default + * values from BSpec. In order to setting an optimal power + * consumption, lower than 4k resoluition mode needs to decrese + * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution + * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE. + */ + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; + val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7); + val |= TGL_EDP_PSR2_FAST_WAKE(7); + } else if (INTEL_GEN(dev_priv) >= 9) { + val |= EDP_PSR2_IO_BUFFER_WAKE(7); + val |= EDP_PSR2_FAST_WAKE(7); + } + /* * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. @@ -657,6 +692,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + if (crtc_state->crc_enabled) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled because it would inhibit pipe CRC calculation\n"); + return false; + } + if (INTEL_GEN(dev_priv) >= 12) { psr_max_h = 5120; psr_max_v = 3200; @@ -671,14 +712,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, max_bpp = 24; } - if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", - crtc_hdisplay, crtc_vdisplay, - psr_max_h, psr_max_v); - return false; - } - if (crtc_state->pipe_bpp > max_bpp) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, pipe bpp %d > max supported %d\n", @@ -699,9 +732,26 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - if (crtc_state->crc_enabled) { + /* + * Some platforms lack PSR2 HW tracking and instead require manual + * tracking by software. In this case, the driver is required to track + * the areas that need updates and program hardware to send selective + * updates. + * + * So until the software tracking is implemented, PSR2 needs to be + * disabled for platforms without PSR2 HW tracking. + */ + if (!HAS_PSR_HW_TRACKING(dev_priv)) { drm_dbg_kms(&dev_priv->drm, - "PSR2 not enabled because it would inhibit pipe CRC calculation\n"); + "No PSR2 HW tracking in the platform\n"); + return false; + } + + if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", + crtc_hdisplay, crtc_vdisplay, + psr_max_h, psr_max_v); return false; } @@ -1450,9 +1500,9 @@ void intel_psr_init(struct drm_i915_private *dev_priv) */ dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; - if (i915_modparams.enable_psr == -1) + if (dev_priv->params.enable_psr == -1) if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) - i915_modparams.enable_psr = 0; + dev_priv->params.enable_psr = 0; /* Set link_standby x link_off defaults */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index bc6c26818e15..773523dcd107 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -411,6 +411,7 @@ static const char *sdvo_cmd_name(u8 cmd) static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { + struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); const char *cmd_name; int i, pos = 0; char buffer[64]; @@ -431,7 +432,7 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, else BUF_PRINT("(%02X)", cmd); - WARN_ON(pos >= sizeof(buffer) - 1); + drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1); #undef BUF_PRINT DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer); @@ -533,6 +534,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response, int response_len) { + struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); const char *cmd_status; u8 retry = 15; /* 5 quick checks, followed by 10 long checks */ u8 status; @@ -597,7 +599,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, BUF_PRINT(" %02X", ((u8 *)response)[i]); } - WARN_ON(pos >= sizeof(buffer) - 1); + drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1); #undef BUF_PRINT DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer); @@ -1081,6 +1083,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { + struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -1106,7 +1109,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo, HDMI_QUANTIZATION_RANGE_FULL); ret = hdmi_avi_infoframe_check(frame); - if (WARN_ON(ret)) + if (drm_WARN_ON(&dev_priv->drm, ret)) return false; return true; @@ -1115,6 +1118,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo, static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; const union hdmi_infoframe *frame = &crtc_state->infoframes.avi; ssize_t len; @@ -1123,11 +1127,12 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0) return true; - if (WARN_ON(frame->any.type != HDMI_INFOFRAME_TYPE_AVI)) + if (drm_WARN_ON(&dev_priv->drm, + frame->any.type != HDMI_INFOFRAME_TYPE_AVI)) return false; len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data)); - if (WARN_ON(len < 0)) + if (drm_WARN_ON(&dev_priv->drm, len < 0)) return false; return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, @@ -1237,6 +1242,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config) { + struct drm_i915_private *dev_priv = to_i915(pipe_config->uapi.crtc->dev); unsigned dotclock = pipe_config->port_clock; struct dpll *clock = &pipe_config->dpll; @@ -1257,7 +1263,8 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config) clock->m1 = 12; clock->m2 = 8; } else { - WARN(1, "SDVO TV clock out of range: %i\n", dotclock); + drm_WARN(&dev_priv->drm, 1, + "SDVO TV clock out of range: %i\n", dotclock); } pipe_config->clock_set = true; @@ -2293,7 +2300,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, return 0; } - WARN_ON(1); + drm_WARN_ON(connector->dev, 1); *val = 0; } else if (property == intel_sdvo_connector->top || property == intel_sdvo_connector->bottom) diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 0000ec7055f7..d03860fef2d7 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -34,6 +34,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_crtc.h> +#include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> #include <drm/drm_rect.h> @@ -333,6 +334,21 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) return 0; } +static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915) +{ + if (IS_ROCKETLAKE(i915)) + return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3); + else + return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5); +} + +bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv, + enum plane_id plane_id) +{ + return INTEL_GEN(dev_priv) >= 11 && + icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id); +} + bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id) { return INTEL_GEN(dev_priv) >= 11 && @@ -3003,7 +3019,7 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, if (icl_is_hdr_plane(dev_priv, plane_id)) { *num_formats = ARRAY_SIZE(icl_hdr_plane_formats); return icl_hdr_plane_formats; - } else if (icl_is_nv12_y_plane(plane_id)) { + } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) { *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats); return icl_sdr_y_plane_formats; } else { @@ -3046,6 +3062,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, struct intel_plane *plane; enum drm_plane_type plane_type; unsigned int supported_rotations; + unsigned int supported_csc; const u64 *modifiers; const u32 *formats; int num_formats; @@ -3120,9 +3137,13 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, DRM_MODE_ROTATE_0, supported_rotations); + supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709); + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020); + drm_plane_create_color_properties(&plane->base, - BIT(DRM_COLOR_YCBCR_BT601) | - BIT(DRM_COLOR_YCBCR_BT709), + supported_csc, BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | BIT(DRM_COLOR_YCBCR_FULL_RANGE), DRM_COLOR_YCBCR_BT709, @@ -3136,6 +3157,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, drm_plane_create_zpos_immutable_property(&plane->base, plane_id); + if (INTEL_GEN(dev_priv) >= 12) + drm_plane_enable_fb_damage_clips(&plane->base); + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); return plane; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index 5eeaa92420d1..cd2104ba1ca1 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -32,21 +32,14 @@ struct intel_plane * skl_universal_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id); -static inline bool icl_is_nv12_y_plane(enum plane_id id) -{ - /* Don't need to do a gen check, these planes are only available on gen11 */ - if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5) - return true; - - return false; -} - static inline u8 icl_hdr_plane_mask(void) { return BIT(PLANE_PRIMARY) | BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1); } +bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv, + enum plane_id plane_id); bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id); int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index b161c15baf86..5b5dc86a5737 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -360,12 +360,12 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port, } if (!icl_tc_phy_set_safe_mode(dig_port, false) && - !WARN_ON(dig_port->tc_legacy_port)) + !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port)) goto out_set_tbt_alt_mode; max_lanes = intel_tc_port_fia_max_lane_count(dig_port); if (dig_port->tc_legacy_port) { - WARN_ON(max_lanes != 4); + drm_WARN_ON(&i915->drm, max_lanes != 4); dig_port->tc_mode = TC_PORT_LEGACY; return; @@ -445,18 +445,20 @@ static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port) static enum tc_port_mode intel_tc_port_get_current_mode(struct intel_digital_port *dig_port) { + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); u32 live_status_mask = tc_port_live_status_mask(dig_port); bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port); enum tc_port_mode mode; - if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port))) + if (in_safe_mode || + drm_WARN_ON(&i915->drm, !icl_tc_phy_status_complete(dig_port))) return TC_PORT_TBT_ALT; mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT; if (live_status_mask) { enum tc_port_mode live_mode = fls(live_status_mask) - 1; - if (!WARN_ON(live_mode == TC_PORT_TBT_ALT)) + if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT)) mode = live_mode; } @@ -505,7 +507,9 @@ static void intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port, int refcount) { - WARN_ON(dig_port->tc_link_refcount); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + + drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount); dig_port->tc_link_refcount = refcount; } diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index fbe12aad7d58..777032d9697b 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -1038,9 +1038,6 @@ intel_tv_mode_to_mode(struct drm_display_mode *mode, /* TV has it's own notion of sync and other mode flags, so clear them. */ mode->flags = 0; - mode->vrefresh = 0; - mode->vrefresh = drm_mode_vrefresh(mode); - snprintf(mode->name, sizeof(mode->name), "%dx%d%c (%s)", mode->hdisplay, mode->vdisplay, @@ -1161,7 +1158,7 @@ intel_tv_get_config(struct intel_encoder *encoder, /* pixel counter doesn't work on i965gm TV output */ if (IS_I965GM(dev_priv)) - adjusted_mode->private_flags |= + pipe_config->mode_flags |= I915_MODE_FLAG_USE_SCANLINE_COUNTER; } @@ -1331,7 +1328,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, /* pixel counter doesn't work on i965gm TV output */ if (IS_I965GM(dev_priv)) - adjusted_mode->private_flags |= + pipe_config->mode_flags |= I915_MODE_FLAG_USE_SCANLINE_COUNTER; return 0; diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 95ad87d4ccb3..d145fe2bed81 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -476,13 +476,13 @@ intel_dsc_power_domain(const struct intel_crtc_state *crtc_state) * POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases: * * - ICL eDP/DSI transcoder - * - TGL pipe A + * - Gen12+ (except RKL) pipe A * * For any other pipe, VDSC/joining uses the power well associated with * the pipe in use. Hence another reference on the pipe power domain * will suffice. (Except no VDSC/joining on ICL pipe A.) */ - if (INTEL_GEN(i915) >= 12 && pipe == PIPE_A) + if (INTEL_GEN(i915) >= 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A) return POWER_DOMAIN_TRANSCODER_VDSC_PW2; else if (is_pipe_dsc(crtc_state)) return POWER_DOMAIN_PIPE(pipe); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index f582ab52f0b0..052e0b31a2da 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -298,7 +298,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, if (IS_GEN9_LP(dev_priv)) { /* Enable Frame time stamp based scanline reporting */ - adjusted_mode->private_flags |= + pipe_config->mode_flags |= I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; /* Dual link goes to DSI transcoder A. */ @@ -1097,8 +1097,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); /* Enable Frame time stamo based scanline reporting */ - adjusted_mode->private_flags |= - I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; + pipe_config->mode_flags |= + I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; /* In terms of pixels */ adjusted_mode->crtc_hdisplay = |